Bluetooth: Fix error handling for HCI socket options
[deliverable/linux.git] / net / bluetooth / hci_sock.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
8c520a59 27#include <linux/export.h>
1da177e4
LT
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
cd82e61c 32#include <net/bluetooth/hci_mon.h>
1da177e4 33
cd82e61c
MH
34static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
1da177e4
LT
36/* ----- HCI socket interface ----- */
37
38static inline int hci_test_bit(int nr, void *addr)
39{
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41}
42
43/* Security filter */
44static struct hci_sec_filter hci_sec_filter = {
45 /* Packet types */
46 0x10,
47 /* Events */
dd7f5527 48 { 0x1000d9fe, 0x0000b00c },
1da177e4
LT
49 /* Commands */
50 {
51 { 0x0 },
52 /* OGF_LINK_CTL */
7c631a67 53 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
1da177e4 54 /* OGF_LINK_POLICY */
7c631a67 55 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
1da177e4 56 /* OGF_HOST_CTL */
7c631a67 57 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
1da177e4 58 /* OGF_INFO_PARAM */
7c631a67 59 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
1da177e4 60 /* OGF_STATUS_PARAM */
7c631a67 61 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
1da177e4
LT
62 }
63};
64
65static struct bt_sock_list hci_sk_list = {
d5fb2962 66 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
1da177e4
LT
67};
68
f81fe64f
MH
69static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
70{
71 struct hci_filter *flt;
72 int flt_type, flt_event;
73
74 /* Apply filter */
75 flt = &hci_pi(sk)->filter;
76
77 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
78 flt_type = 0;
79 else
80 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
81
82 if (!test_bit(flt_type, &flt->type_mask))
83 return true;
84
85 /* Extra filter for event packets only */
86 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
87 return false;
88
89 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
90
91 if (!hci_test_bit(flt_event, &flt->event_mask))
92 return true;
93
94 /* Check filter only when opcode is set */
95 if (!flt->opcode)
96 return false;
97
98 if (flt_event == HCI_EV_CMD_COMPLETE &&
99 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
100 return true;
101
102 if (flt_event == HCI_EV_CMD_STATUS &&
103 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
104 return true;
105
106 return false;
107}
108
1da177e4 109/* Send frame to RAW socket */
470fe1b5 110void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
111{
112 struct sock *sk;
e0edf373 113 struct sk_buff *skb_copy = NULL;
1da177e4
LT
114
115 BT_DBG("hdev %p len %d", hdev, skb->len);
116
117 read_lock(&hci_sk_list.lock);
470fe1b5 118
b67bfe0d 119 sk_for_each(sk, &hci_sk_list.head) {
1da177e4
LT
120 struct sk_buff *nskb;
121
122 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
123 continue;
124
125 /* Don't send frame to the socket it came from */
126 if (skb->sk == sk)
127 continue;
128
470fe1b5 129 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
a40c406c
JH
130 continue;
131
f81fe64f 132 if (is_filtered_packet(sk, skb))
1da177e4
LT
133 continue;
134
e0edf373
MH
135 if (!skb_copy) {
136 /* Create a private copy with headroom */
137 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
138 if (!skb_copy)
139 continue;
140
141 /* Put type byte before the data */
142 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
143 }
144
145 nskb = skb_clone(skb_copy, GFP_ATOMIC);
70f23020 146 if (!nskb)
1da177e4
LT
147 continue;
148
470fe1b5
MH
149 if (sock_queue_rcv_skb(sk, nskb))
150 kfree_skb(nskb);
151 }
152
153 read_unlock(&hci_sk_list.lock);
e0edf373
MH
154
155 kfree_skb(skb_copy);
470fe1b5
MH
156}
157
158/* Send frame to control socket */
159void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
160{
161 struct sock *sk;
470fe1b5
MH
162
163 BT_DBG("len %d", skb->len);
164
165 read_lock(&hci_sk_list.lock);
166
b67bfe0d 167 sk_for_each(sk, &hci_sk_list.head) {
470fe1b5
MH
168 struct sk_buff *nskb;
169
170 /* Skip the original socket */
171 if (sk == skip_sk)
172 continue;
173
174 if (sk->sk_state != BT_BOUND)
175 continue;
176
177 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
178 continue;
179
180 nskb = skb_clone(skb, GFP_ATOMIC);
181 if (!nskb)
182 continue;
1da177e4
LT
183
184 if (sock_queue_rcv_skb(sk, nskb))
185 kfree_skb(nskb);
186 }
470fe1b5 187
1da177e4
LT
188 read_unlock(&hci_sk_list.lock);
189}
190
cd82e61c
MH
191/* Send frame to monitor socket */
192void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
193{
194 struct sock *sk;
cd82e61c
MH
195 struct sk_buff *skb_copy = NULL;
196 __le16 opcode;
197
198 if (!atomic_read(&monitor_promisc))
199 return;
200
201 BT_DBG("hdev %p len %d", hdev, skb->len);
202
203 switch (bt_cb(skb)->pkt_type) {
204 case HCI_COMMAND_PKT:
205 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
206 break;
207 case HCI_EVENT_PKT:
208 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
209 break;
210 case HCI_ACLDATA_PKT:
211 if (bt_cb(skb)->incoming)
212 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
213 else
214 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
215 break;
216 case HCI_SCODATA_PKT:
217 if (bt_cb(skb)->incoming)
218 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
219 else
220 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
221 break;
222 default:
223 return;
224 }
225
226 read_lock(&hci_sk_list.lock);
227
b67bfe0d 228 sk_for_each(sk, &hci_sk_list.head) {
cd82e61c
MH
229 struct sk_buff *nskb;
230
231 if (sk->sk_state != BT_BOUND)
232 continue;
233
234 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
235 continue;
236
237 if (!skb_copy) {
238 struct hci_mon_hdr *hdr;
239
240 /* Create a private copy with headroom */
8fc9ced3
GP
241 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
242 GFP_ATOMIC);
cd82e61c
MH
243 if (!skb_copy)
244 continue;
245
246 /* Put header before the data */
247 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
248 hdr->opcode = opcode;
249 hdr->index = cpu_to_le16(hdev->id);
250 hdr->len = cpu_to_le16(skb->len);
251 }
252
253 nskb = skb_clone(skb_copy, GFP_ATOMIC);
254 if (!nskb)
255 continue;
256
257 if (sock_queue_rcv_skb(sk, nskb))
258 kfree_skb(nskb);
259 }
260
261 read_unlock(&hci_sk_list.lock);
262
263 kfree_skb(skb_copy);
264}
265
266static void send_monitor_event(struct sk_buff *skb)
267{
268 struct sock *sk;
cd82e61c
MH
269
270 BT_DBG("len %d", skb->len);
271
272 read_lock(&hci_sk_list.lock);
273
b67bfe0d 274 sk_for_each(sk, &hci_sk_list.head) {
cd82e61c
MH
275 struct sk_buff *nskb;
276
277 if (sk->sk_state != BT_BOUND)
278 continue;
279
280 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
281 continue;
282
283 nskb = skb_clone(skb, GFP_ATOMIC);
284 if (!nskb)
285 continue;
286
287 if (sock_queue_rcv_skb(sk, nskb))
288 kfree_skb(nskb);
289 }
290
291 read_unlock(&hci_sk_list.lock);
292}
293
294static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
295{
296 struct hci_mon_hdr *hdr;
297 struct hci_mon_new_index *ni;
298 struct sk_buff *skb;
299 __le16 opcode;
300
301 switch (event) {
302 case HCI_DEV_REG:
303 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
304 if (!skb)
305 return NULL;
306
307 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
308 ni->type = hdev->dev_type;
309 ni->bus = hdev->bus;
310 bacpy(&ni->bdaddr, &hdev->bdaddr);
311 memcpy(ni->name, hdev->name, 8);
312
313 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
314 break;
315
316 case HCI_DEV_UNREG:
317 skb = bt_skb_alloc(0, GFP_ATOMIC);
318 if (!skb)
319 return NULL;
320
321 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
322 break;
323
324 default:
325 return NULL;
326 }
327
328 __net_timestamp(skb);
329
330 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
331 hdr->opcode = opcode;
332 hdr->index = cpu_to_le16(hdev->id);
333 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
334
335 return skb;
336}
337
338static void send_monitor_replay(struct sock *sk)
339{
340 struct hci_dev *hdev;
341
342 read_lock(&hci_dev_list_lock);
343
344 list_for_each_entry(hdev, &hci_dev_list, list) {
345 struct sk_buff *skb;
346
347 skb = create_monitor_event(hdev, HCI_DEV_REG);
348 if (!skb)
349 continue;
350
351 if (sock_queue_rcv_skb(sk, skb))
352 kfree_skb(skb);
353 }
354
355 read_unlock(&hci_dev_list_lock);
356}
357
040030ef
MH
358/* Generate internal stack event */
359static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
360{
361 struct hci_event_hdr *hdr;
362 struct hci_ev_stack_internal *ev;
363 struct sk_buff *skb;
364
365 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
366 if (!skb)
367 return;
368
369 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
370 hdr->evt = HCI_EV_STACK_INTERNAL;
371 hdr->plen = sizeof(*ev) + dlen;
372
373 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
374 ev->type = type;
375 memcpy(ev->data, data, dlen);
376
377 bt_cb(skb)->incoming = 1;
378 __net_timestamp(skb);
379
380 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
381 skb->dev = (void *) hdev;
382 hci_send_to_sock(hdev, skb);
383 kfree_skb(skb);
384}
385
386void hci_sock_dev_event(struct hci_dev *hdev, int event)
387{
388 struct hci_ev_si_device ev;
389
390 BT_DBG("hdev %s event %d", hdev->name, event);
391
cd82e61c
MH
392 /* Send event to monitor */
393 if (atomic_read(&monitor_promisc)) {
394 struct sk_buff *skb;
395
396 skb = create_monitor_event(hdev, event);
397 if (skb) {
398 send_monitor_event(skb);
399 kfree_skb(skb);
400 }
401 }
402
040030ef
MH
403 /* Send event to sockets */
404 ev.event = event;
405 ev.dev_id = hdev->id;
406 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
407
408 if (event == HCI_DEV_UNREG) {
409 struct sock *sk;
040030ef
MH
410
411 /* Detach sockets from device */
412 read_lock(&hci_sk_list.lock);
b67bfe0d 413 sk_for_each(sk, &hci_sk_list.head) {
040030ef
MH
414 bh_lock_sock_nested(sk);
415 if (hci_pi(sk)->hdev == hdev) {
416 hci_pi(sk)->hdev = NULL;
417 sk->sk_err = EPIPE;
418 sk->sk_state = BT_OPEN;
419 sk->sk_state_change(sk);
420
421 hci_dev_put(hdev);
422 }
423 bh_unlock_sock(sk);
424 }
425 read_unlock(&hci_sk_list.lock);
426 }
427}
428
1da177e4
LT
429static int hci_sock_release(struct socket *sock)
430{
431 struct sock *sk = sock->sk;
7b005bd3 432 struct hci_dev *hdev;
1da177e4
LT
433
434 BT_DBG("sock %p sk %p", sock, sk);
435
436 if (!sk)
437 return 0;
438
7b005bd3
MH
439 hdev = hci_pi(sk)->hdev;
440
cd82e61c
MH
441 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
442 atomic_dec(&monitor_promisc);
443
1da177e4
LT
444 bt_sock_unlink(&hci_sk_list, sk);
445
446 if (hdev) {
447 atomic_dec(&hdev->promisc);
448 hci_dev_put(hdev);
449 }
450
451 sock_orphan(sk);
452
453 skb_queue_purge(&sk->sk_receive_queue);
454 skb_queue_purge(&sk->sk_write_queue);
455
456 sock_put(sk);
457 return 0;
458}
459
b2a66aad 460static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
f0358568
JH
461{
462 bdaddr_t bdaddr;
5e762444 463 int err;
f0358568
JH
464
465 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
466 return -EFAULT;
467
09fd0de5 468 hci_dev_lock(hdev);
5e762444 469
88c1fe4b 470 err = hci_blacklist_add(hdev, &bdaddr, 0);
5e762444 471
09fd0de5 472 hci_dev_unlock(hdev);
5e762444
AJ
473
474 return err;
f0358568
JH
475}
476
b2a66aad 477static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
f0358568
JH
478{
479 bdaddr_t bdaddr;
5e762444 480 int err;
f0358568
JH
481
482 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
483 return -EFAULT;
484
09fd0de5 485 hci_dev_lock(hdev);
5e762444 486
88c1fe4b 487 err = hci_blacklist_del(hdev, &bdaddr, 0);
5e762444 488
09fd0de5 489 hci_dev_unlock(hdev);
5e762444
AJ
490
491 return err;
f0358568
JH
492}
493
8e87d142 494/* Ioctls that require bound socket */
6039aa73
GP
495static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
496 unsigned long arg)
1da177e4
LT
497{
498 struct hci_dev *hdev = hci_pi(sk)->hdev;
499
500 if (!hdev)
501 return -EBADFD;
502
503 switch (cmd) {
504 case HCISETRAW:
505 if (!capable(CAP_NET_ADMIN))
bf5b30b8 506 return -EPERM;
1da177e4
LT
507
508 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
509 return -EPERM;
510
511 if (arg)
512 set_bit(HCI_RAW, &hdev->flags);
513 else
514 clear_bit(HCI_RAW, &hdev->flags);
515
516 return 0;
517
1da177e4 518 case HCIGETCONNINFO:
40be492f
MH
519 return hci_get_conn_info(hdev, (void __user *) arg);
520
521 case HCIGETAUTHINFO:
522 return hci_get_auth_info(hdev, (void __user *) arg);
1da177e4 523
f0358568
JH
524 case HCIBLOCKADDR:
525 if (!capable(CAP_NET_ADMIN))
bf5b30b8 526 return -EPERM;
b2a66aad 527 return hci_sock_blacklist_add(hdev, (void __user *) arg);
f0358568
JH
528
529 case HCIUNBLOCKADDR:
530 if (!capable(CAP_NET_ADMIN))
bf5b30b8 531 return -EPERM;
b2a66aad 532 return hci_sock_blacklist_del(hdev, (void __user *) arg);
f0358568 533
1da177e4
LT
534 default:
535 if (hdev->ioctl)
536 return hdev->ioctl(hdev, cmd, arg);
537 return -EINVAL;
538 }
539}
540
8fc9ced3
GP
541static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
542 unsigned long arg)
1da177e4
LT
543{
544 struct sock *sk = sock->sk;
40be492f 545 void __user *argp = (void __user *) arg;
1da177e4
LT
546 int err;
547
548 BT_DBG("cmd %x arg %lx", cmd, arg);
549
550 switch (cmd) {
551 case HCIGETDEVLIST:
552 return hci_get_dev_list(argp);
553
554 case HCIGETDEVINFO:
555 return hci_get_dev_info(argp);
556
557 case HCIGETCONNLIST:
558 return hci_get_conn_list(argp);
559
560 case HCIDEVUP:
561 if (!capable(CAP_NET_ADMIN))
bf5b30b8 562 return -EPERM;
1da177e4
LT
563 return hci_dev_open(arg);
564
565 case HCIDEVDOWN:
566 if (!capable(CAP_NET_ADMIN))
bf5b30b8 567 return -EPERM;
1da177e4
LT
568 return hci_dev_close(arg);
569
570 case HCIDEVRESET:
571 if (!capable(CAP_NET_ADMIN))
bf5b30b8 572 return -EPERM;
1da177e4
LT
573 return hci_dev_reset(arg);
574
575 case HCIDEVRESTAT:
576 if (!capable(CAP_NET_ADMIN))
bf5b30b8 577 return -EPERM;
1da177e4
LT
578 return hci_dev_reset_stat(arg);
579
580 case HCISETSCAN:
581 case HCISETAUTH:
582 case HCISETENCRYPT:
583 case HCISETPTYPE:
584 case HCISETLINKPOL:
585 case HCISETLINKMODE:
586 case HCISETACLMTU:
587 case HCISETSCOMTU:
588 if (!capable(CAP_NET_ADMIN))
bf5b30b8 589 return -EPERM;
1da177e4
LT
590 return hci_dev_cmd(cmd, argp);
591
592 case HCIINQUIRY:
593 return hci_inquiry(argp);
594
595 default:
596 lock_sock(sk);
597 err = hci_sock_bound_ioctl(sk, cmd, arg);
598 release_sock(sk);
599 return err;
600 }
601}
602
8fc9ced3
GP
603static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
604 int addr_len)
1da177e4 605{
0381101f 606 struct sockaddr_hci haddr;
1da177e4
LT
607 struct sock *sk = sock->sk;
608 struct hci_dev *hdev = NULL;
0381101f 609 int len, err = 0;
1da177e4
LT
610
611 BT_DBG("sock %p sk %p", sock, sk);
612
0381101f
JH
613 if (!addr)
614 return -EINVAL;
615
616 memset(&haddr, 0, sizeof(haddr));
617 len = min_t(unsigned int, sizeof(haddr), addr_len);
618 memcpy(&haddr, addr, len);
619
620 if (haddr.hci_family != AF_BLUETOOTH)
621 return -EINVAL;
622
1da177e4
LT
623 lock_sock(sk);
624
7cc2ade2 625 if (sk->sk_state == BT_BOUND) {
1da177e4
LT
626 err = -EALREADY;
627 goto done;
628 }
629
7cc2ade2
MH
630 switch (haddr.hci_channel) {
631 case HCI_CHANNEL_RAW:
632 if (hci_pi(sk)->hdev) {
633 err = -EALREADY;
1da177e4
LT
634 goto done;
635 }
636
7cc2ade2
MH
637 if (haddr.hci_dev != HCI_DEV_NONE) {
638 hdev = hci_dev_get(haddr.hci_dev);
639 if (!hdev) {
640 err = -ENODEV;
641 goto done;
642 }
643
644 atomic_inc(&hdev->promisc);
645 }
646
647 hci_pi(sk)->hdev = hdev;
648 break;
649
650 case HCI_CHANNEL_CONTROL:
4b95a24c 651 if (haddr.hci_dev != HCI_DEV_NONE) {
7cc2ade2
MH
652 err = -EINVAL;
653 goto done;
654 }
655
801f13bd
MH
656 if (!capable(CAP_NET_ADMIN)) {
657 err = -EPERM;
658 goto done;
659 }
660
7cc2ade2
MH
661 break;
662
cd82e61c
MH
663 case HCI_CHANNEL_MONITOR:
664 if (haddr.hci_dev != HCI_DEV_NONE) {
665 err = -EINVAL;
666 goto done;
667 }
668
669 if (!capable(CAP_NET_RAW)) {
670 err = -EPERM;
671 goto done;
672 }
673
674 send_monitor_replay(sk);
675
676 atomic_inc(&monitor_promisc);
677 break;
678
7cc2ade2
MH
679 default:
680 err = -EINVAL;
681 goto done;
1da177e4
LT
682 }
683
7cc2ade2 684
0381101f 685 hci_pi(sk)->channel = haddr.hci_channel;
1da177e4
LT
686 sk->sk_state = BT_BOUND;
687
688done:
689 release_sock(sk);
690 return err;
691}
692
8fc9ced3
GP
693static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
694 int *addr_len, int peer)
1da177e4
LT
695{
696 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
697 struct sock *sk = sock->sk;
9d4b68b2
MH
698 struct hci_dev *hdev;
699 int err = 0;
1da177e4
LT
700
701 BT_DBG("sock %p sk %p", sock, sk);
702
06f43cbc
MH
703 if (peer)
704 return -EOPNOTSUPP;
705
1da177e4
LT
706 lock_sock(sk);
707
9d4b68b2
MH
708 hdev = hci_pi(sk)->hdev;
709 if (!hdev) {
710 err = -EBADFD;
711 goto done;
712 }
713
1da177e4
LT
714 *addr_len = sizeof(*haddr);
715 haddr->hci_family = AF_BLUETOOTH;
7b005bd3 716 haddr->hci_dev = hdev->id;
9d4b68b2 717 haddr->hci_channel= hci_pi(sk)->channel;
1da177e4 718
9d4b68b2 719done:
1da177e4 720 release_sock(sk);
9d4b68b2 721 return err;
1da177e4
LT
722}
723
6039aa73
GP
724static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
725 struct sk_buff *skb)
1da177e4
LT
726{
727 __u32 mask = hci_pi(sk)->cmsg_mask;
728
0d48d939
MH
729 if (mask & HCI_CMSG_DIR) {
730 int incoming = bt_cb(skb)->incoming;
8fc9ced3
GP
731 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
732 &incoming);
0d48d939 733 }
1da177e4 734
a61bbcf2 735 if (mask & HCI_CMSG_TSTAMP) {
f6e623a6
JFS
736#ifdef CONFIG_COMPAT
737 struct compat_timeval ctv;
738#endif
a61bbcf2 739 struct timeval tv;
767c5eb5
MH
740 void *data;
741 int len;
a61bbcf2
PM
742
743 skb_get_timestamp(skb, &tv);
767c5eb5 744
1da97f83
DM
745 data = &tv;
746 len = sizeof(tv);
747#ifdef CONFIG_COMPAT
da88cea1
L
748 if (!COMPAT_USE_64BIT_TIME &&
749 (msg->msg_flags & MSG_CMSG_COMPAT)) {
767c5eb5
MH
750 ctv.tv_sec = tv.tv_sec;
751 ctv.tv_usec = tv.tv_usec;
752 data = &ctv;
753 len = sizeof(ctv);
767c5eb5 754 }
1da97f83 755#endif
767c5eb5
MH
756
757 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
a61bbcf2 758 }
1da177e4 759}
8e87d142
YH
760
761static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
3bb3c755 762 struct msghdr *msg, size_t len, int flags)
1da177e4
LT
763{
764 int noblock = flags & MSG_DONTWAIT;
765 struct sock *sk = sock->sk;
766 struct sk_buff *skb;
767 int copied, err;
768
769 BT_DBG("sock %p, sk %p", sock, sk);
770
771 if (flags & (MSG_OOB))
772 return -EOPNOTSUPP;
773
774 if (sk->sk_state == BT_CLOSED)
775 return 0;
776
70f23020
AE
777 skb = skb_recv_datagram(sk, flags, noblock, &err);
778 if (!skb)
1da177e4
LT
779 return err;
780
781 msg->msg_namelen = 0;
782
783 copied = skb->len;
784 if (len < copied) {
785 msg->msg_flags |= MSG_TRUNC;
786 copied = len;
787 }
788
badff6d0 789 skb_reset_transport_header(skb);
1da177e4
LT
790 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
791
3a208627
MH
792 switch (hci_pi(sk)->channel) {
793 case HCI_CHANNEL_RAW:
794 hci_sock_cmsg(sk, msg, skb);
795 break;
97e0bdeb 796 case HCI_CHANNEL_CONTROL:
cd82e61c
MH
797 case HCI_CHANNEL_MONITOR:
798 sock_recv_timestamp(msg, sk, skb);
799 break;
3a208627 800 }
1da177e4
LT
801
802 skb_free_datagram(sk, skb);
803
804 return err ? : copied;
805}
806
8e87d142 807static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1da177e4
LT
808 struct msghdr *msg, size_t len)
809{
810 struct sock *sk = sock->sk;
811 struct hci_dev *hdev;
812 struct sk_buff *skb;
813 int err;
814
815 BT_DBG("sock %p sk %p", sock, sk);
816
817 if (msg->msg_flags & MSG_OOB)
818 return -EOPNOTSUPP;
819
820 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
821 return -EINVAL;
822
823 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
824 return -EINVAL;
825
826 lock_sock(sk);
827
0381101f
JH
828 switch (hci_pi(sk)->channel) {
829 case HCI_CHANNEL_RAW:
830 break;
831 case HCI_CHANNEL_CONTROL:
832 err = mgmt_control(sk, msg, len);
833 goto done;
cd82e61c
MH
834 case HCI_CHANNEL_MONITOR:
835 err = -EOPNOTSUPP;
836 goto done;
0381101f
JH
837 default:
838 err = -EINVAL;
839 goto done;
840 }
841
70f23020
AE
842 hdev = hci_pi(sk)->hdev;
843 if (!hdev) {
1da177e4
LT
844 err = -EBADFD;
845 goto done;
846 }
847
7e21addc
MH
848 if (!test_bit(HCI_UP, &hdev->flags)) {
849 err = -ENETDOWN;
850 goto done;
851 }
852
70f23020
AE
853 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
854 if (!skb)
1da177e4
LT
855 goto done;
856
857 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
858 err = -EFAULT;
859 goto drop;
860 }
861
0d48d939 862 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1da177e4
LT
863 skb_pull(skb, 1);
864 skb->dev = (void *) hdev;
865
0d48d939 866 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
83985319 867 u16 opcode = get_unaligned_le16(skb->data);
1da177e4
LT
868 u16 ogf = hci_opcode_ogf(opcode);
869 u16 ocf = hci_opcode_ocf(opcode);
870
871 if (((ogf > HCI_SFLT_MAX_OGF) ||
3bb3c755
GP
872 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
873 &hci_sec_filter.ocf_mask[ogf])) &&
874 !capable(CAP_NET_RAW)) {
1da177e4
LT
875 err = -EPERM;
876 goto drop;
877 }
878
a9de9248 879 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
1da177e4 880 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 881 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 882 } else {
11714b3d
JH
883 /* Stand-alone HCI commands must be flaged as
884 * single-command requests.
885 */
886 bt_cb(skb)->req.start = true;
887
1da177e4 888 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 889 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
890 }
891 } else {
892 if (!capable(CAP_NET_RAW)) {
893 err = -EPERM;
894 goto drop;
895 }
896
897 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 898 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
899 }
900
901 err = len;
902
903done:
904 release_sock(sk);
905 return err;
906
907drop:
908 kfree_skb(skb);
909 goto done;
910}
911
8fc9ced3
GP
912static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
913 char __user *optval, unsigned int len)
1da177e4
LT
914{
915 struct hci_ufilter uf = { .opcode = 0 };
916 struct sock *sk = sock->sk;
917 int err = 0, opt = 0;
918
919 BT_DBG("sk %p, opt %d", sk, optname);
920
921 lock_sock(sk);
922
2f39cdb7 923 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
c2371e80 924 err = -EBADFD;
2f39cdb7
MH
925 goto done;
926 }
927
1da177e4
LT
928 switch (optname) {
929 case HCI_DATA_DIR:
930 if (get_user(opt, (int __user *)optval)) {
931 err = -EFAULT;
932 break;
933 }
934
935 if (opt)
936 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
937 else
938 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
939 break;
940
941 case HCI_TIME_STAMP:
942 if (get_user(opt, (int __user *)optval)) {
943 err = -EFAULT;
944 break;
945 }
946
947 if (opt)
948 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
949 else
950 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
951 break;
952
953 case HCI_FILTER:
0878b666
MH
954 {
955 struct hci_filter *f = &hci_pi(sk)->filter;
956
957 uf.type_mask = f->type_mask;
958 uf.opcode = f->opcode;
959 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
960 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
961 }
962
1da177e4
LT
963 len = min_t(unsigned int, len, sizeof(uf));
964 if (copy_from_user(&uf, optval, len)) {
965 err = -EFAULT;
966 break;
967 }
968
969 if (!capable(CAP_NET_RAW)) {
970 uf.type_mask &= hci_sec_filter.type_mask;
971 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
972 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
973 }
974
975 {
976 struct hci_filter *f = &hci_pi(sk)->filter;
977
978 f->type_mask = uf.type_mask;
979 f->opcode = uf.opcode;
980 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
981 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
982 }
8e87d142 983 break;
1da177e4
LT
984
985 default:
986 err = -ENOPROTOOPT;
987 break;
988 }
989
2f39cdb7 990done:
1da177e4
LT
991 release_sock(sk);
992 return err;
993}
994
8fc9ced3
GP
995static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
996 char __user *optval, int __user *optlen)
1da177e4
LT
997{
998 struct hci_ufilter uf;
999 struct sock *sk = sock->sk;
cedc5469
MH
1000 int len, opt, err = 0;
1001
1002 BT_DBG("sk %p, opt %d", sk, optname);
1da177e4
LT
1003
1004 if (get_user(len, optlen))
1005 return -EFAULT;
1006
cedc5469
MH
1007 lock_sock(sk);
1008
1009 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
c2371e80 1010 err = -EBADFD;
cedc5469
MH
1011 goto done;
1012 }
1013
1da177e4
LT
1014 switch (optname) {
1015 case HCI_DATA_DIR:
1016 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1017 opt = 1;
8e87d142 1018 else
1da177e4
LT
1019 opt = 0;
1020
1021 if (put_user(opt, optval))
cedc5469 1022 err = -EFAULT;
1da177e4
LT
1023 break;
1024
1025 case HCI_TIME_STAMP:
1026 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1027 opt = 1;
8e87d142 1028 else
1da177e4
LT
1029 opt = 0;
1030
1031 if (put_user(opt, optval))
cedc5469 1032 err = -EFAULT;
1da177e4
LT
1033 break;
1034
1035 case HCI_FILTER:
1036 {
1037 struct hci_filter *f = &hci_pi(sk)->filter;
1038
e15ca9a0 1039 memset(&uf, 0, sizeof(uf));
1da177e4
LT
1040 uf.type_mask = f->type_mask;
1041 uf.opcode = f->opcode;
1042 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1043 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1044 }
1045
1046 len = min_t(unsigned int, len, sizeof(uf));
1047 if (copy_to_user(optval, &uf, len))
cedc5469 1048 err = -EFAULT;
1da177e4
LT
1049 break;
1050
1051 default:
cedc5469 1052 err = -ENOPROTOOPT;
1da177e4
LT
1053 break;
1054 }
1055
cedc5469
MH
1056done:
1057 release_sock(sk);
1058 return err;
1da177e4
LT
1059}
1060
90ddc4f0 1061static const struct proto_ops hci_sock_ops = {
1da177e4
LT
1062 .family = PF_BLUETOOTH,
1063 .owner = THIS_MODULE,
1064 .release = hci_sock_release,
1065 .bind = hci_sock_bind,
1066 .getname = hci_sock_getname,
1067 .sendmsg = hci_sock_sendmsg,
1068 .recvmsg = hci_sock_recvmsg,
1069 .ioctl = hci_sock_ioctl,
1070 .poll = datagram_poll,
1071 .listen = sock_no_listen,
1072 .shutdown = sock_no_shutdown,
1073 .setsockopt = hci_sock_setsockopt,
1074 .getsockopt = hci_sock_getsockopt,
1075 .connect = sock_no_connect,
1076 .socketpair = sock_no_socketpair,
1077 .accept = sock_no_accept,
1078 .mmap = sock_no_mmap
1079};
1080
1081static struct proto hci_sk_proto = {
1082 .name = "HCI",
1083 .owner = THIS_MODULE,
1084 .obj_size = sizeof(struct hci_pinfo)
1085};
1086
3f378b68
EP
1087static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1088 int kern)
1da177e4
LT
1089{
1090 struct sock *sk;
1091
1092 BT_DBG("sock %p", sock);
1093
1094 if (sock->type != SOCK_RAW)
1095 return -ESOCKTNOSUPPORT;
1096
1097 sock->ops = &hci_sock_ops;
1098
6257ff21 1099 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1da177e4
LT
1100 if (!sk)
1101 return -ENOMEM;
1102
1103 sock_init_data(sock, sk);
1104
1105 sock_reset_flag(sk, SOCK_ZAPPED);
1106
1107 sk->sk_protocol = protocol;
1108
1109 sock->state = SS_UNCONNECTED;
1110 sk->sk_state = BT_OPEN;
1111
1112 bt_sock_link(&hci_sk_list, sk);
1113 return 0;
1114}
1115
ec1b4cf7 1116static const struct net_proto_family hci_sock_family_ops = {
1da177e4
LT
1117 .family = PF_BLUETOOTH,
1118 .owner = THIS_MODULE,
1119 .create = hci_sock_create,
1120};
1121
1da177e4
LT
1122int __init hci_sock_init(void)
1123{
1124 int err;
1125
1126 err = proto_register(&hci_sk_proto, 0);
1127 if (err < 0)
1128 return err;
1129
1130 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
f7c86637
MY
1131 if (err < 0) {
1132 BT_ERR("HCI socket registration failed");
1da177e4 1133 goto error;
f7c86637
MY
1134 }
1135
b0316615 1136 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
f7c86637
MY
1137 if (err < 0) {
1138 BT_ERR("Failed to create HCI proc file");
1139 bt_sock_unregister(BTPROTO_HCI);
1140 goto error;
1141 }
1da177e4 1142
1da177e4
LT
1143 BT_INFO("HCI socket layer initialized");
1144
1145 return 0;
1146
1147error:
1da177e4
LT
1148 proto_unregister(&hci_sk_proto);
1149 return err;
1150}
1151
b7440a14 1152void hci_sock_cleanup(void)
1da177e4 1153{
f7c86637 1154 bt_procfs_cleanup(&init_net, "hci");
5e9d7f86 1155 bt_sock_unregister(BTPROTO_HCI);
1da177e4 1156 proto_unregister(&hci_sk_proto);
1da177e4 1157}
This page took 0.729914 seconds and 5 git commands to generate.