Bluetooth: Delcare the hci_sec_filter as const
[deliverable/linux.git] / net / bluetooth / hci_sock.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
36 /* ----- HCI socket interface ----- */
37
38 static inline int hci_test_bit(int nr, void *addr)
39 {
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41 }
42
43 /* Security filter */
44 #define HCI_SFLT_MAX_OGF 5
45
46 struct hci_sec_filter {
47 __u32 type_mask;
48 __u32 event_mask[2];
49 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
50 };
51
52 static const struct hci_sec_filter hci_sec_filter = {
53 /* Packet types */
54 0x10,
55 /* Events */
56 { 0x1000d9fe, 0x0000b00c },
57 /* Commands */
58 {
59 { 0x0 },
60 /* OGF_LINK_CTL */
61 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
62 /* OGF_LINK_POLICY */
63 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
64 /* OGF_HOST_CTL */
65 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
66 /* OGF_INFO_PARAM */
67 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
68 /* OGF_STATUS_PARAM */
69 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
70 }
71 };
72
73 static struct bt_sock_list hci_sk_list = {
74 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
75 };
76
77 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
78 {
79 struct hci_filter *flt;
80 int flt_type, flt_event;
81
82 /* Apply filter */
83 flt = &hci_pi(sk)->filter;
84
85 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
86 flt_type = 0;
87 else
88 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
89
90 if (!test_bit(flt_type, &flt->type_mask))
91 return true;
92
93 /* Extra filter for event packets only */
94 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
95 return false;
96
97 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
98
99 if (!hci_test_bit(flt_event, &flt->event_mask))
100 return true;
101
102 /* Check filter only when opcode is set */
103 if (!flt->opcode)
104 return false;
105
106 if (flt_event == HCI_EV_CMD_COMPLETE &&
107 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
108 return true;
109
110 if (flt_event == HCI_EV_CMD_STATUS &&
111 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
112 return true;
113
114 return false;
115 }
116
117 /* Send frame to RAW socket */
118 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
119 {
120 struct sock *sk;
121 struct sk_buff *skb_copy = NULL;
122
123 BT_DBG("hdev %p len %d", hdev, skb->len);
124
125 read_lock(&hci_sk_list.lock);
126
127 sk_for_each(sk, &hci_sk_list.head) {
128 struct sk_buff *nskb;
129
130 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
131 continue;
132
133 /* Don't send frame to the socket it came from */
134 if (skb->sk == sk)
135 continue;
136
137 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
138 if (is_filtered_packet(sk, skb))
139 continue;
140 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
141 if (!bt_cb(skb)->incoming)
142 continue;
143 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
144 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
145 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
146 continue;
147 } else {
148 /* Don't send frame to other channel types */
149 continue;
150 }
151
152 if (!skb_copy) {
153 /* Create a private copy with headroom */
154 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
155 if (!skb_copy)
156 continue;
157
158 /* Put type byte before the data */
159 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
160 }
161
162 nskb = skb_clone(skb_copy, GFP_ATOMIC);
163 if (!nskb)
164 continue;
165
166 if (sock_queue_rcv_skb(sk, nskb))
167 kfree_skb(nskb);
168 }
169
170 read_unlock(&hci_sk_list.lock);
171
172 kfree_skb(skb_copy);
173 }
174
175 /* Send frame to control socket */
176 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
177 {
178 struct sock *sk;
179
180 BT_DBG("len %d", skb->len);
181
182 read_lock(&hci_sk_list.lock);
183
184 sk_for_each(sk, &hci_sk_list.head) {
185 struct sk_buff *nskb;
186
187 /* Skip the original socket */
188 if (sk == skip_sk)
189 continue;
190
191 if (sk->sk_state != BT_BOUND)
192 continue;
193
194 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
195 continue;
196
197 nskb = skb_clone(skb, GFP_ATOMIC);
198 if (!nskb)
199 continue;
200
201 if (sock_queue_rcv_skb(sk, nskb))
202 kfree_skb(nskb);
203 }
204
205 read_unlock(&hci_sk_list.lock);
206 }
207
208 /* Send frame to monitor socket */
209 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
210 {
211 struct sock *sk;
212 struct sk_buff *skb_copy = NULL;
213 __le16 opcode;
214
215 if (!atomic_read(&monitor_promisc))
216 return;
217
218 BT_DBG("hdev %p len %d", hdev, skb->len);
219
220 switch (bt_cb(skb)->pkt_type) {
221 case HCI_COMMAND_PKT:
222 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
223 break;
224 case HCI_EVENT_PKT:
225 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
226 break;
227 case HCI_ACLDATA_PKT:
228 if (bt_cb(skb)->incoming)
229 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
230 else
231 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
232 break;
233 case HCI_SCODATA_PKT:
234 if (bt_cb(skb)->incoming)
235 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
236 else
237 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
238 break;
239 default:
240 return;
241 }
242
243 read_lock(&hci_sk_list.lock);
244
245 sk_for_each(sk, &hci_sk_list.head) {
246 struct sk_buff *nskb;
247
248 if (sk->sk_state != BT_BOUND)
249 continue;
250
251 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
252 continue;
253
254 if (!skb_copy) {
255 struct hci_mon_hdr *hdr;
256
257 /* Create a private copy with headroom */
258 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE,
259 GFP_ATOMIC, true);
260 if (!skb_copy)
261 continue;
262
263 /* Put header before the data */
264 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
265 hdr->opcode = opcode;
266 hdr->index = cpu_to_le16(hdev->id);
267 hdr->len = cpu_to_le16(skb->len);
268 }
269
270 nskb = skb_clone(skb_copy, GFP_ATOMIC);
271 if (!nskb)
272 continue;
273
274 if (sock_queue_rcv_skb(sk, nskb))
275 kfree_skb(nskb);
276 }
277
278 read_unlock(&hci_sk_list.lock);
279
280 kfree_skb(skb_copy);
281 }
282
283 static void send_monitor_event(struct sk_buff *skb)
284 {
285 struct sock *sk;
286
287 BT_DBG("len %d", skb->len);
288
289 read_lock(&hci_sk_list.lock);
290
291 sk_for_each(sk, &hci_sk_list.head) {
292 struct sk_buff *nskb;
293
294 if (sk->sk_state != BT_BOUND)
295 continue;
296
297 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
298 continue;
299
300 nskb = skb_clone(skb, GFP_ATOMIC);
301 if (!nskb)
302 continue;
303
304 if (sock_queue_rcv_skb(sk, nskb))
305 kfree_skb(nskb);
306 }
307
308 read_unlock(&hci_sk_list.lock);
309 }
310
311 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
312 {
313 struct hci_mon_hdr *hdr;
314 struct hci_mon_new_index *ni;
315 struct sk_buff *skb;
316 __le16 opcode;
317
318 switch (event) {
319 case HCI_DEV_REG:
320 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
321 if (!skb)
322 return NULL;
323
324 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
325 ni->type = hdev->dev_type;
326 ni->bus = hdev->bus;
327 bacpy(&ni->bdaddr, &hdev->bdaddr);
328 memcpy(ni->name, hdev->name, 8);
329
330 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
331 break;
332
333 case HCI_DEV_UNREG:
334 skb = bt_skb_alloc(0, GFP_ATOMIC);
335 if (!skb)
336 return NULL;
337
338 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
339 break;
340
341 default:
342 return NULL;
343 }
344
345 __net_timestamp(skb);
346
347 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
348 hdr->opcode = opcode;
349 hdr->index = cpu_to_le16(hdev->id);
350 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
351
352 return skb;
353 }
354
355 static void send_monitor_replay(struct sock *sk)
356 {
357 struct hci_dev *hdev;
358
359 read_lock(&hci_dev_list_lock);
360
361 list_for_each_entry(hdev, &hci_dev_list, list) {
362 struct sk_buff *skb;
363
364 skb = create_monitor_event(hdev, HCI_DEV_REG);
365 if (!skb)
366 continue;
367
368 if (sock_queue_rcv_skb(sk, skb))
369 kfree_skb(skb);
370 }
371
372 read_unlock(&hci_dev_list_lock);
373 }
374
375 /* Generate internal stack event */
376 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
377 {
378 struct hci_event_hdr *hdr;
379 struct hci_ev_stack_internal *ev;
380 struct sk_buff *skb;
381
382 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
383 if (!skb)
384 return;
385
386 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
387 hdr->evt = HCI_EV_STACK_INTERNAL;
388 hdr->plen = sizeof(*ev) + dlen;
389
390 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
391 ev->type = type;
392 memcpy(ev->data, data, dlen);
393
394 bt_cb(skb)->incoming = 1;
395 __net_timestamp(skb);
396
397 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
398 hci_send_to_sock(hdev, skb);
399 kfree_skb(skb);
400 }
401
402 void hci_sock_dev_event(struct hci_dev *hdev, int event)
403 {
404 struct hci_ev_si_device ev;
405
406 BT_DBG("hdev %s event %d", hdev->name, event);
407
408 /* Send event to monitor */
409 if (atomic_read(&monitor_promisc)) {
410 struct sk_buff *skb;
411
412 skb = create_monitor_event(hdev, event);
413 if (skb) {
414 send_monitor_event(skb);
415 kfree_skb(skb);
416 }
417 }
418
419 /* Send event to sockets */
420 ev.event = event;
421 ev.dev_id = hdev->id;
422 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
423
424 if (event == HCI_DEV_UNREG) {
425 struct sock *sk;
426
427 /* Detach sockets from device */
428 read_lock(&hci_sk_list.lock);
429 sk_for_each(sk, &hci_sk_list.head) {
430 bh_lock_sock_nested(sk);
431 if (hci_pi(sk)->hdev == hdev) {
432 hci_pi(sk)->hdev = NULL;
433 sk->sk_err = EPIPE;
434 sk->sk_state = BT_OPEN;
435 sk->sk_state_change(sk);
436
437 hci_dev_put(hdev);
438 }
439 bh_unlock_sock(sk);
440 }
441 read_unlock(&hci_sk_list.lock);
442 }
443 }
444
445 static int hci_sock_release(struct socket *sock)
446 {
447 struct sock *sk = sock->sk;
448 struct hci_dev *hdev;
449
450 BT_DBG("sock %p sk %p", sock, sk);
451
452 if (!sk)
453 return 0;
454
455 hdev = hci_pi(sk)->hdev;
456
457 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
458 atomic_dec(&monitor_promisc);
459
460 bt_sock_unlink(&hci_sk_list, sk);
461
462 if (hdev) {
463 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
464 mgmt_index_added(hdev);
465 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
466 hci_dev_close(hdev->id);
467 }
468
469 atomic_dec(&hdev->promisc);
470 hci_dev_put(hdev);
471 }
472
473 sock_orphan(sk);
474
475 skb_queue_purge(&sk->sk_receive_queue);
476 skb_queue_purge(&sk->sk_write_queue);
477
478 sock_put(sk);
479 return 0;
480 }
481
482 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
483 {
484 bdaddr_t bdaddr;
485 int err;
486
487 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
488 return -EFAULT;
489
490 hci_dev_lock(hdev);
491
492 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
493
494 hci_dev_unlock(hdev);
495
496 return err;
497 }
498
499 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
500 {
501 bdaddr_t bdaddr;
502 int err;
503
504 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
505 return -EFAULT;
506
507 hci_dev_lock(hdev);
508
509 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
510
511 hci_dev_unlock(hdev);
512
513 return err;
514 }
515
516 /* Ioctls that require bound socket */
517 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
518 unsigned long arg)
519 {
520 struct hci_dev *hdev = hci_pi(sk)->hdev;
521
522 if (!hdev)
523 return -EBADFD;
524
525 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
526 return -EBUSY;
527
528 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
529 return -EOPNOTSUPP;
530
531 if (hdev->dev_type != HCI_BREDR)
532 return -EOPNOTSUPP;
533
534 switch (cmd) {
535 case HCISETRAW:
536 if (!capable(CAP_NET_ADMIN))
537 return -EPERM;
538 return -EOPNOTSUPP;
539
540 case HCIGETCONNINFO:
541 return hci_get_conn_info(hdev, (void __user *) arg);
542
543 case HCIGETAUTHINFO:
544 return hci_get_auth_info(hdev, (void __user *) arg);
545
546 case HCIBLOCKADDR:
547 if (!capable(CAP_NET_ADMIN))
548 return -EPERM;
549 return hci_sock_blacklist_add(hdev, (void __user *) arg);
550
551 case HCIUNBLOCKADDR:
552 if (!capable(CAP_NET_ADMIN))
553 return -EPERM;
554 return hci_sock_blacklist_del(hdev, (void __user *) arg);
555 }
556
557 return -ENOIOCTLCMD;
558 }
559
560 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
561 unsigned long arg)
562 {
563 void __user *argp = (void __user *) arg;
564 struct sock *sk = sock->sk;
565 int err;
566
567 BT_DBG("cmd %x arg %lx", cmd, arg);
568
569 lock_sock(sk);
570
571 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
572 err = -EBADFD;
573 goto done;
574 }
575
576 release_sock(sk);
577
578 switch (cmd) {
579 case HCIGETDEVLIST:
580 return hci_get_dev_list(argp);
581
582 case HCIGETDEVINFO:
583 return hci_get_dev_info(argp);
584
585 case HCIGETCONNLIST:
586 return hci_get_conn_list(argp);
587
588 case HCIDEVUP:
589 if (!capable(CAP_NET_ADMIN))
590 return -EPERM;
591 return hci_dev_open(arg);
592
593 case HCIDEVDOWN:
594 if (!capable(CAP_NET_ADMIN))
595 return -EPERM;
596 return hci_dev_close(arg);
597
598 case HCIDEVRESET:
599 if (!capable(CAP_NET_ADMIN))
600 return -EPERM;
601 return hci_dev_reset(arg);
602
603 case HCIDEVRESTAT:
604 if (!capable(CAP_NET_ADMIN))
605 return -EPERM;
606 return hci_dev_reset_stat(arg);
607
608 case HCISETSCAN:
609 case HCISETAUTH:
610 case HCISETENCRYPT:
611 case HCISETPTYPE:
612 case HCISETLINKPOL:
613 case HCISETLINKMODE:
614 case HCISETACLMTU:
615 case HCISETSCOMTU:
616 if (!capable(CAP_NET_ADMIN))
617 return -EPERM;
618 return hci_dev_cmd(cmd, argp);
619
620 case HCIINQUIRY:
621 return hci_inquiry(argp);
622 }
623
624 lock_sock(sk);
625
626 err = hci_sock_bound_ioctl(sk, cmd, arg);
627
628 done:
629 release_sock(sk);
630 return err;
631 }
632
633 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
634 int addr_len)
635 {
636 struct sockaddr_hci haddr;
637 struct sock *sk = sock->sk;
638 struct hci_dev *hdev = NULL;
639 int len, err = 0;
640
641 BT_DBG("sock %p sk %p", sock, sk);
642
643 if (!addr)
644 return -EINVAL;
645
646 memset(&haddr, 0, sizeof(haddr));
647 len = min_t(unsigned int, sizeof(haddr), addr_len);
648 memcpy(&haddr, addr, len);
649
650 if (haddr.hci_family != AF_BLUETOOTH)
651 return -EINVAL;
652
653 lock_sock(sk);
654
655 if (sk->sk_state == BT_BOUND) {
656 err = -EALREADY;
657 goto done;
658 }
659
660 switch (haddr.hci_channel) {
661 case HCI_CHANNEL_RAW:
662 if (hci_pi(sk)->hdev) {
663 err = -EALREADY;
664 goto done;
665 }
666
667 if (haddr.hci_dev != HCI_DEV_NONE) {
668 hdev = hci_dev_get(haddr.hci_dev);
669 if (!hdev) {
670 err = -ENODEV;
671 goto done;
672 }
673
674 atomic_inc(&hdev->promisc);
675 }
676
677 hci_pi(sk)->hdev = hdev;
678 break;
679
680 case HCI_CHANNEL_USER:
681 if (hci_pi(sk)->hdev) {
682 err = -EALREADY;
683 goto done;
684 }
685
686 if (haddr.hci_dev == HCI_DEV_NONE) {
687 err = -EINVAL;
688 goto done;
689 }
690
691 if (!capable(CAP_NET_ADMIN)) {
692 err = -EPERM;
693 goto done;
694 }
695
696 hdev = hci_dev_get(haddr.hci_dev);
697 if (!hdev) {
698 err = -ENODEV;
699 goto done;
700 }
701
702 if (test_bit(HCI_UP, &hdev->flags) ||
703 test_bit(HCI_INIT, &hdev->flags) ||
704 test_bit(HCI_SETUP, &hdev->dev_flags) ||
705 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
706 err = -EBUSY;
707 hci_dev_put(hdev);
708 goto done;
709 }
710
711 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
712 err = -EUSERS;
713 hci_dev_put(hdev);
714 goto done;
715 }
716
717 mgmt_index_removed(hdev);
718
719 err = hci_dev_open(hdev->id);
720 if (err) {
721 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
722 mgmt_index_added(hdev);
723 hci_dev_put(hdev);
724 goto done;
725 }
726
727 atomic_inc(&hdev->promisc);
728
729 hci_pi(sk)->hdev = hdev;
730 break;
731
732 case HCI_CHANNEL_CONTROL:
733 if (haddr.hci_dev != HCI_DEV_NONE) {
734 err = -EINVAL;
735 goto done;
736 }
737
738 if (!capable(CAP_NET_ADMIN)) {
739 err = -EPERM;
740 goto done;
741 }
742
743 break;
744
745 case HCI_CHANNEL_MONITOR:
746 if (haddr.hci_dev != HCI_DEV_NONE) {
747 err = -EINVAL;
748 goto done;
749 }
750
751 if (!capable(CAP_NET_RAW)) {
752 err = -EPERM;
753 goto done;
754 }
755
756 send_monitor_replay(sk);
757
758 atomic_inc(&monitor_promisc);
759 break;
760
761 default:
762 err = -EINVAL;
763 goto done;
764 }
765
766
767 hci_pi(sk)->channel = haddr.hci_channel;
768 sk->sk_state = BT_BOUND;
769
770 done:
771 release_sock(sk);
772 return err;
773 }
774
775 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
776 int *addr_len, int peer)
777 {
778 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
779 struct sock *sk = sock->sk;
780 struct hci_dev *hdev;
781 int err = 0;
782
783 BT_DBG("sock %p sk %p", sock, sk);
784
785 if (peer)
786 return -EOPNOTSUPP;
787
788 lock_sock(sk);
789
790 hdev = hci_pi(sk)->hdev;
791 if (!hdev) {
792 err = -EBADFD;
793 goto done;
794 }
795
796 *addr_len = sizeof(*haddr);
797 haddr->hci_family = AF_BLUETOOTH;
798 haddr->hci_dev = hdev->id;
799 haddr->hci_channel= hci_pi(sk)->channel;
800
801 done:
802 release_sock(sk);
803 return err;
804 }
805
806 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
807 struct sk_buff *skb)
808 {
809 __u32 mask = hci_pi(sk)->cmsg_mask;
810
811 if (mask & HCI_CMSG_DIR) {
812 int incoming = bt_cb(skb)->incoming;
813 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
814 &incoming);
815 }
816
817 if (mask & HCI_CMSG_TSTAMP) {
818 #ifdef CONFIG_COMPAT
819 struct compat_timeval ctv;
820 #endif
821 struct timeval tv;
822 void *data;
823 int len;
824
825 skb_get_timestamp(skb, &tv);
826
827 data = &tv;
828 len = sizeof(tv);
829 #ifdef CONFIG_COMPAT
830 if (!COMPAT_USE_64BIT_TIME &&
831 (msg->msg_flags & MSG_CMSG_COMPAT)) {
832 ctv.tv_sec = tv.tv_sec;
833 ctv.tv_usec = tv.tv_usec;
834 data = &ctv;
835 len = sizeof(ctv);
836 }
837 #endif
838
839 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
840 }
841 }
842
843 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
844 struct msghdr *msg, size_t len, int flags)
845 {
846 int noblock = flags & MSG_DONTWAIT;
847 struct sock *sk = sock->sk;
848 struct sk_buff *skb;
849 int copied, err;
850
851 BT_DBG("sock %p, sk %p", sock, sk);
852
853 if (flags & (MSG_OOB))
854 return -EOPNOTSUPP;
855
856 if (sk->sk_state == BT_CLOSED)
857 return 0;
858
859 skb = skb_recv_datagram(sk, flags, noblock, &err);
860 if (!skb)
861 return err;
862
863 copied = skb->len;
864 if (len < copied) {
865 msg->msg_flags |= MSG_TRUNC;
866 copied = len;
867 }
868
869 skb_reset_transport_header(skb);
870 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
871
872 switch (hci_pi(sk)->channel) {
873 case HCI_CHANNEL_RAW:
874 hci_sock_cmsg(sk, msg, skb);
875 break;
876 case HCI_CHANNEL_USER:
877 case HCI_CHANNEL_CONTROL:
878 case HCI_CHANNEL_MONITOR:
879 sock_recv_timestamp(msg, sk, skb);
880 break;
881 }
882
883 skb_free_datagram(sk, skb);
884
885 return err ? : copied;
886 }
887
888 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
889 struct msghdr *msg, size_t len)
890 {
891 struct sock *sk = sock->sk;
892 struct hci_dev *hdev;
893 struct sk_buff *skb;
894 int err;
895
896 BT_DBG("sock %p sk %p", sock, sk);
897
898 if (msg->msg_flags & MSG_OOB)
899 return -EOPNOTSUPP;
900
901 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
902 return -EINVAL;
903
904 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
905 return -EINVAL;
906
907 lock_sock(sk);
908
909 switch (hci_pi(sk)->channel) {
910 case HCI_CHANNEL_RAW:
911 case HCI_CHANNEL_USER:
912 break;
913 case HCI_CHANNEL_CONTROL:
914 err = mgmt_control(sk, msg, len);
915 goto done;
916 case HCI_CHANNEL_MONITOR:
917 err = -EOPNOTSUPP;
918 goto done;
919 default:
920 err = -EINVAL;
921 goto done;
922 }
923
924 hdev = hci_pi(sk)->hdev;
925 if (!hdev) {
926 err = -EBADFD;
927 goto done;
928 }
929
930 if (!test_bit(HCI_UP, &hdev->flags)) {
931 err = -ENETDOWN;
932 goto done;
933 }
934
935 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
936 if (!skb)
937 goto done;
938
939 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
940 err = -EFAULT;
941 goto drop;
942 }
943
944 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
945 skb_pull(skb, 1);
946
947 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
948 /* No permission check is needed for user channel
949 * since that gets enforced when binding the socket.
950 *
951 * However check that the packet type is valid.
952 */
953 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
954 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
955 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
956 err = -EINVAL;
957 goto drop;
958 }
959
960 skb_queue_tail(&hdev->raw_q, skb);
961 queue_work(hdev->workqueue, &hdev->tx_work);
962 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
963 u16 opcode = get_unaligned_le16(skb->data);
964 u16 ogf = hci_opcode_ogf(opcode);
965 u16 ocf = hci_opcode_ocf(opcode);
966
967 if (((ogf > HCI_SFLT_MAX_OGF) ||
968 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
969 &hci_sec_filter.ocf_mask[ogf])) &&
970 !capable(CAP_NET_RAW)) {
971 err = -EPERM;
972 goto drop;
973 }
974
975 if (ogf == 0x3f) {
976 skb_queue_tail(&hdev->raw_q, skb);
977 queue_work(hdev->workqueue, &hdev->tx_work);
978 } else {
979 /* Stand-alone HCI commands must be flaged as
980 * single-command requests.
981 */
982 bt_cb(skb)->req.start = true;
983
984 skb_queue_tail(&hdev->cmd_q, skb);
985 queue_work(hdev->workqueue, &hdev->cmd_work);
986 }
987 } else {
988 if (!capable(CAP_NET_RAW)) {
989 err = -EPERM;
990 goto drop;
991 }
992
993 skb_queue_tail(&hdev->raw_q, skb);
994 queue_work(hdev->workqueue, &hdev->tx_work);
995 }
996
997 err = len;
998
999 done:
1000 release_sock(sk);
1001 return err;
1002
1003 drop:
1004 kfree_skb(skb);
1005 goto done;
1006 }
1007
1008 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1009 char __user *optval, unsigned int len)
1010 {
1011 struct hci_ufilter uf = { .opcode = 0 };
1012 struct sock *sk = sock->sk;
1013 int err = 0, opt = 0;
1014
1015 BT_DBG("sk %p, opt %d", sk, optname);
1016
1017 lock_sock(sk);
1018
1019 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1020 err = -EBADFD;
1021 goto done;
1022 }
1023
1024 switch (optname) {
1025 case HCI_DATA_DIR:
1026 if (get_user(opt, (int __user *)optval)) {
1027 err = -EFAULT;
1028 break;
1029 }
1030
1031 if (opt)
1032 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1033 else
1034 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1035 break;
1036
1037 case HCI_TIME_STAMP:
1038 if (get_user(opt, (int __user *)optval)) {
1039 err = -EFAULT;
1040 break;
1041 }
1042
1043 if (opt)
1044 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1045 else
1046 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1047 break;
1048
1049 case HCI_FILTER:
1050 {
1051 struct hci_filter *f = &hci_pi(sk)->filter;
1052
1053 uf.type_mask = f->type_mask;
1054 uf.opcode = f->opcode;
1055 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1056 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1057 }
1058
1059 len = min_t(unsigned int, len, sizeof(uf));
1060 if (copy_from_user(&uf, optval, len)) {
1061 err = -EFAULT;
1062 break;
1063 }
1064
1065 if (!capable(CAP_NET_RAW)) {
1066 uf.type_mask &= hci_sec_filter.type_mask;
1067 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1068 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1069 }
1070
1071 {
1072 struct hci_filter *f = &hci_pi(sk)->filter;
1073
1074 f->type_mask = uf.type_mask;
1075 f->opcode = uf.opcode;
1076 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1077 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1078 }
1079 break;
1080
1081 default:
1082 err = -ENOPROTOOPT;
1083 break;
1084 }
1085
1086 done:
1087 release_sock(sk);
1088 return err;
1089 }
1090
1091 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1092 char __user *optval, int __user *optlen)
1093 {
1094 struct hci_ufilter uf;
1095 struct sock *sk = sock->sk;
1096 int len, opt, err = 0;
1097
1098 BT_DBG("sk %p, opt %d", sk, optname);
1099
1100 if (get_user(len, optlen))
1101 return -EFAULT;
1102
1103 lock_sock(sk);
1104
1105 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1106 err = -EBADFD;
1107 goto done;
1108 }
1109
1110 switch (optname) {
1111 case HCI_DATA_DIR:
1112 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1113 opt = 1;
1114 else
1115 opt = 0;
1116
1117 if (put_user(opt, optval))
1118 err = -EFAULT;
1119 break;
1120
1121 case HCI_TIME_STAMP:
1122 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1123 opt = 1;
1124 else
1125 opt = 0;
1126
1127 if (put_user(opt, optval))
1128 err = -EFAULT;
1129 break;
1130
1131 case HCI_FILTER:
1132 {
1133 struct hci_filter *f = &hci_pi(sk)->filter;
1134
1135 memset(&uf, 0, sizeof(uf));
1136 uf.type_mask = f->type_mask;
1137 uf.opcode = f->opcode;
1138 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1139 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1140 }
1141
1142 len = min_t(unsigned int, len, sizeof(uf));
1143 if (copy_to_user(optval, &uf, len))
1144 err = -EFAULT;
1145 break;
1146
1147 default:
1148 err = -ENOPROTOOPT;
1149 break;
1150 }
1151
1152 done:
1153 release_sock(sk);
1154 return err;
1155 }
1156
1157 static const struct proto_ops hci_sock_ops = {
1158 .family = PF_BLUETOOTH,
1159 .owner = THIS_MODULE,
1160 .release = hci_sock_release,
1161 .bind = hci_sock_bind,
1162 .getname = hci_sock_getname,
1163 .sendmsg = hci_sock_sendmsg,
1164 .recvmsg = hci_sock_recvmsg,
1165 .ioctl = hci_sock_ioctl,
1166 .poll = datagram_poll,
1167 .listen = sock_no_listen,
1168 .shutdown = sock_no_shutdown,
1169 .setsockopt = hci_sock_setsockopt,
1170 .getsockopt = hci_sock_getsockopt,
1171 .connect = sock_no_connect,
1172 .socketpair = sock_no_socketpair,
1173 .accept = sock_no_accept,
1174 .mmap = sock_no_mmap
1175 };
1176
1177 static struct proto hci_sk_proto = {
1178 .name = "HCI",
1179 .owner = THIS_MODULE,
1180 .obj_size = sizeof(struct hci_pinfo)
1181 };
1182
1183 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1184 int kern)
1185 {
1186 struct sock *sk;
1187
1188 BT_DBG("sock %p", sock);
1189
1190 if (sock->type != SOCK_RAW)
1191 return -ESOCKTNOSUPPORT;
1192
1193 sock->ops = &hci_sock_ops;
1194
1195 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1196 if (!sk)
1197 return -ENOMEM;
1198
1199 sock_init_data(sock, sk);
1200
1201 sock_reset_flag(sk, SOCK_ZAPPED);
1202
1203 sk->sk_protocol = protocol;
1204
1205 sock->state = SS_UNCONNECTED;
1206 sk->sk_state = BT_OPEN;
1207
1208 bt_sock_link(&hci_sk_list, sk);
1209 return 0;
1210 }
1211
1212 static const struct net_proto_family hci_sock_family_ops = {
1213 .family = PF_BLUETOOTH,
1214 .owner = THIS_MODULE,
1215 .create = hci_sock_create,
1216 };
1217
1218 int __init hci_sock_init(void)
1219 {
1220 int err;
1221
1222 err = proto_register(&hci_sk_proto, 0);
1223 if (err < 0)
1224 return err;
1225
1226 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1227 if (err < 0) {
1228 BT_ERR("HCI socket registration failed");
1229 goto error;
1230 }
1231
1232 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1233 if (err < 0) {
1234 BT_ERR("Failed to create HCI proc file");
1235 bt_sock_unregister(BTPROTO_HCI);
1236 goto error;
1237 }
1238
1239 BT_INFO("HCI socket layer initialized");
1240
1241 return 0;
1242
1243 error:
1244 proto_unregister(&hci_sk_proto);
1245 return err;
1246 }
1247
1248 void hci_sock_cleanup(void)
1249 {
1250 bt_procfs_cleanup(&init_net, "hci");
1251 bt_sock_unregister(BTPROTO_HCI);
1252 proto_unregister(&hci_sk_proto);
1253 }
This page took 0.248079 seconds and 5 git commands to generate.