Bluetooth: Remove most of the inline usage
[deliverable/linux.git] / net / bluetooth / hci_sock.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
1da177e4
LT
27#include <linux/module.h>
28
29#include <linux/types.h>
4fc268d2 30#include <linux/capability.h>
1da177e4
LT
31#include <linux/errno.h>
32#include <linux/kernel.h>
1da177e4
LT
33#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/skbuff.h>
38#include <linux/workqueue.h>
39#include <linux/interrupt.h>
767c5eb5 40#include <linux/compat.h>
1da177e4
LT
41#include <linux/socket.h>
42#include <linux/ioctl.h>
43#include <net/sock.h>
44
70f23020 45#include <linux/uaccess.h>
1da177e4
LT
46#include <asm/unaligned.h>
47
48#include <net/bluetooth/bluetooth.h>
49#include <net/bluetooth/hci_core.h>
cd82e61c 50#include <net/bluetooth/hci_mon.h>
1da177e4 51
cd82e61c
MH
52static atomic_t monitor_promisc = ATOMIC_INIT(0);
53
1da177e4
LT
54/* ----- HCI socket interface ----- */
55
56static inline int hci_test_bit(int nr, void *addr)
57{
58 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
59}
60
61/* Security filter */
62static struct hci_sec_filter hci_sec_filter = {
63 /* Packet types */
64 0x10,
65 /* Events */
dd7f5527 66 { 0x1000d9fe, 0x0000b00c },
1da177e4
LT
67 /* Commands */
68 {
69 { 0x0 },
70 /* OGF_LINK_CTL */
7c631a67 71 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
1da177e4 72 /* OGF_LINK_POLICY */
7c631a67 73 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
1da177e4 74 /* OGF_HOST_CTL */
7c631a67 75 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
1da177e4 76 /* OGF_INFO_PARAM */
7c631a67 77 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
1da177e4 78 /* OGF_STATUS_PARAM */
7c631a67 79 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
1da177e4
LT
80 }
81};
82
83static struct bt_sock_list hci_sk_list = {
d5fb2962 84 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
1da177e4
LT
85};
86
87/* Send frame to RAW socket */
470fe1b5 88void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
89{
90 struct sock *sk;
91 struct hlist_node *node;
e0edf373 92 struct sk_buff *skb_copy = NULL;
1da177e4
LT
93
94 BT_DBG("hdev %p len %d", hdev, skb->len);
95
96 read_lock(&hci_sk_list.lock);
470fe1b5 97
1da177e4
LT
98 sk_for_each(sk, node, &hci_sk_list.head) {
99 struct hci_filter *flt;
100 struct sk_buff *nskb;
101
102 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
103 continue;
104
105 /* Don't send frame to the socket it came from */
106 if (skb->sk == sk)
107 continue;
108
470fe1b5 109 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
a40c406c
JH
110 continue;
111
1da177e4
LT
112 /* Apply filter */
113 flt = &hci_pi(sk)->filter;
114
0d48d939 115 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
3bb3c755
GP
116 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
117 &flt->type_mask))
1da177e4
LT
118 continue;
119
0d48d939 120 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
1da177e4
LT
121 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
122
123 if (!hci_test_bit(evt, &flt->event_mask))
124 continue;
125
4498c80d
DM
126 if (flt->opcode &&
127 ((evt == HCI_EV_CMD_COMPLETE &&
128 flt->opcode !=
905f3ed6 129 get_unaligned((__le16 *)(skb->data + 3))) ||
4498c80d
DM
130 (evt == HCI_EV_CMD_STATUS &&
131 flt->opcode !=
905f3ed6 132 get_unaligned((__le16 *)(skb->data + 4)))))
1da177e4
LT
133 continue;
134 }
135
e0edf373
MH
136 if (!skb_copy) {
137 /* Create a private copy with headroom */
138 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
139 if (!skb_copy)
140 continue;
141
142 /* Put type byte before the data */
143 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
144 }
145
146 nskb = skb_clone(skb_copy, GFP_ATOMIC);
70f23020 147 if (!nskb)
1da177e4
LT
148 continue;
149
470fe1b5
MH
150 if (sock_queue_rcv_skb(sk, nskb))
151 kfree_skb(nskb);
152 }
153
154 read_unlock(&hci_sk_list.lock);
e0edf373
MH
155
156 kfree_skb(skb_copy);
470fe1b5
MH
157}
158
159/* Send frame to control socket */
160void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
161{
162 struct sock *sk;
163 struct hlist_node *node;
164
165 BT_DBG("len %d", skb->len);
166
167 read_lock(&hci_sk_list.lock);
168
169 sk_for_each(sk, node, &hci_sk_list.head) {
170 struct sk_buff *nskb;
171
172 /* Skip the original socket */
173 if (sk == skip_sk)
174 continue;
175
176 if (sk->sk_state != BT_BOUND)
177 continue;
178
179 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
180 continue;
181
182 nskb = skb_clone(skb, GFP_ATOMIC);
183 if (!nskb)
184 continue;
1da177e4
LT
185
186 if (sock_queue_rcv_skb(sk, nskb))
187 kfree_skb(nskb);
188 }
470fe1b5 189
1da177e4
LT
190 read_unlock(&hci_sk_list.lock);
191}
192
cd82e61c
MH
193/* Send frame to monitor socket */
194void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
195{
196 struct sock *sk;
197 struct hlist_node *node;
198 struct sk_buff *skb_copy = NULL;
199 __le16 opcode;
200
201 if (!atomic_read(&monitor_promisc))
202 return;
203
204 BT_DBG("hdev %p len %d", hdev, skb->len);
205
206 switch (bt_cb(skb)->pkt_type) {
207 case HCI_COMMAND_PKT:
208 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
209 break;
210 case HCI_EVENT_PKT:
211 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
212 break;
213 case HCI_ACLDATA_PKT:
214 if (bt_cb(skb)->incoming)
215 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
216 else
217 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
218 break;
219 case HCI_SCODATA_PKT:
220 if (bt_cb(skb)->incoming)
221 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
222 else
223 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
224 break;
225 default:
226 return;
227 }
228
229 read_lock(&hci_sk_list.lock);
230
231 sk_for_each(sk, node, &hci_sk_list.head) {
232 struct sk_buff *nskb;
233
234 if (sk->sk_state != BT_BOUND)
235 continue;
236
237 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
238 continue;
239
240 if (!skb_copy) {
241 struct hci_mon_hdr *hdr;
242
243 /* Create a private copy with headroom */
244 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
245 if (!skb_copy)
246 continue;
247
248 /* Put header before the data */
249 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
250 hdr->opcode = opcode;
251 hdr->index = cpu_to_le16(hdev->id);
252 hdr->len = cpu_to_le16(skb->len);
253 }
254
255 nskb = skb_clone(skb_copy, GFP_ATOMIC);
256 if (!nskb)
257 continue;
258
259 if (sock_queue_rcv_skb(sk, nskb))
260 kfree_skb(nskb);
261 }
262
263 read_unlock(&hci_sk_list.lock);
264
265 kfree_skb(skb_copy);
266}
267
268static void send_monitor_event(struct sk_buff *skb)
269{
270 struct sock *sk;
271 struct hlist_node *node;
272
273 BT_DBG("len %d", skb->len);
274
275 read_lock(&hci_sk_list.lock);
276
277 sk_for_each(sk, node, &hci_sk_list.head) {
278 struct sk_buff *nskb;
279
280 if (sk->sk_state != BT_BOUND)
281 continue;
282
283 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
284 continue;
285
286 nskb = skb_clone(skb, GFP_ATOMIC);
287 if (!nskb)
288 continue;
289
290 if (sock_queue_rcv_skb(sk, nskb))
291 kfree_skb(nskb);
292 }
293
294 read_unlock(&hci_sk_list.lock);
295}
296
297static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
298{
299 struct hci_mon_hdr *hdr;
300 struct hci_mon_new_index *ni;
301 struct sk_buff *skb;
302 __le16 opcode;
303
304 switch (event) {
305 case HCI_DEV_REG:
306 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
307 if (!skb)
308 return NULL;
309
310 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
311 ni->type = hdev->dev_type;
312 ni->bus = hdev->bus;
313 bacpy(&ni->bdaddr, &hdev->bdaddr);
314 memcpy(ni->name, hdev->name, 8);
315
316 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
317 break;
318
319 case HCI_DEV_UNREG:
320 skb = bt_skb_alloc(0, GFP_ATOMIC);
321 if (!skb)
322 return NULL;
323
324 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
325 break;
326
327 default:
328 return NULL;
329 }
330
331 __net_timestamp(skb);
332
333 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
334 hdr->opcode = opcode;
335 hdr->index = cpu_to_le16(hdev->id);
336 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
337
338 return skb;
339}
340
341static void send_monitor_replay(struct sock *sk)
342{
343 struct hci_dev *hdev;
344
345 read_lock(&hci_dev_list_lock);
346
347 list_for_each_entry(hdev, &hci_dev_list, list) {
348 struct sk_buff *skb;
349
350 skb = create_monitor_event(hdev, HCI_DEV_REG);
351 if (!skb)
352 continue;
353
354 if (sock_queue_rcv_skb(sk, skb))
355 kfree_skb(skb);
356 }
357
358 read_unlock(&hci_dev_list_lock);
359}
360
040030ef
MH
361/* Generate internal stack event */
362static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
363{
364 struct hci_event_hdr *hdr;
365 struct hci_ev_stack_internal *ev;
366 struct sk_buff *skb;
367
368 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
369 if (!skb)
370 return;
371
372 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
373 hdr->evt = HCI_EV_STACK_INTERNAL;
374 hdr->plen = sizeof(*ev) + dlen;
375
376 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
377 ev->type = type;
378 memcpy(ev->data, data, dlen);
379
380 bt_cb(skb)->incoming = 1;
381 __net_timestamp(skb);
382
383 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
384 skb->dev = (void *) hdev;
385 hci_send_to_sock(hdev, skb);
386 kfree_skb(skb);
387}
388
389void hci_sock_dev_event(struct hci_dev *hdev, int event)
390{
391 struct hci_ev_si_device ev;
392
393 BT_DBG("hdev %s event %d", hdev->name, event);
394
cd82e61c
MH
395 /* Send event to monitor */
396 if (atomic_read(&monitor_promisc)) {
397 struct sk_buff *skb;
398
399 skb = create_monitor_event(hdev, event);
400 if (skb) {
401 send_monitor_event(skb);
402 kfree_skb(skb);
403 }
404 }
405
040030ef
MH
406 /* Send event to sockets */
407 ev.event = event;
408 ev.dev_id = hdev->id;
409 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
410
411 if (event == HCI_DEV_UNREG) {
412 struct sock *sk;
413 struct hlist_node *node;
414
415 /* Detach sockets from device */
416 read_lock(&hci_sk_list.lock);
417 sk_for_each(sk, node, &hci_sk_list.head) {
418 bh_lock_sock_nested(sk);
419 if (hci_pi(sk)->hdev == hdev) {
420 hci_pi(sk)->hdev = NULL;
421 sk->sk_err = EPIPE;
422 sk->sk_state = BT_OPEN;
423 sk->sk_state_change(sk);
424
425 hci_dev_put(hdev);
426 }
427 bh_unlock_sock(sk);
428 }
429 read_unlock(&hci_sk_list.lock);
430 }
431}
432
1da177e4
LT
433static int hci_sock_release(struct socket *sock)
434{
435 struct sock *sk = sock->sk;
7b005bd3 436 struct hci_dev *hdev;
1da177e4
LT
437
438 BT_DBG("sock %p sk %p", sock, sk);
439
440 if (!sk)
441 return 0;
442
7b005bd3
MH
443 hdev = hci_pi(sk)->hdev;
444
cd82e61c
MH
445 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
446 atomic_dec(&monitor_promisc);
447
1da177e4
LT
448 bt_sock_unlink(&hci_sk_list, sk);
449
450 if (hdev) {
451 atomic_dec(&hdev->promisc);
452 hci_dev_put(hdev);
453 }
454
455 sock_orphan(sk);
456
457 skb_queue_purge(&sk->sk_receive_queue);
458 skb_queue_purge(&sk->sk_write_queue);
459
460 sock_put(sk);
461 return 0;
462}
463
b2a66aad 464static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
f0358568
JH
465{
466 bdaddr_t bdaddr;
5e762444 467 int err;
f0358568
JH
468
469 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
470 return -EFAULT;
471
09fd0de5 472 hci_dev_lock(hdev);
5e762444 473
88c1fe4b 474 err = hci_blacklist_add(hdev, &bdaddr, 0);
5e762444 475
09fd0de5 476 hci_dev_unlock(hdev);
5e762444
AJ
477
478 return err;
f0358568
JH
479}
480
b2a66aad 481static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
f0358568
JH
482{
483 bdaddr_t bdaddr;
5e762444 484 int err;
f0358568
JH
485
486 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
487 return -EFAULT;
488
09fd0de5 489 hci_dev_lock(hdev);
5e762444 490
88c1fe4b 491 err = hci_blacklist_del(hdev, &bdaddr, 0);
5e762444 492
09fd0de5 493 hci_dev_unlock(hdev);
5e762444
AJ
494
495 return err;
f0358568
JH
496}
497
8e87d142 498/* Ioctls that require bound socket */
6039aa73
GP
499static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
500 unsigned long arg)
1da177e4
LT
501{
502 struct hci_dev *hdev = hci_pi(sk)->hdev;
503
504 if (!hdev)
505 return -EBADFD;
506
507 switch (cmd) {
508 case HCISETRAW:
509 if (!capable(CAP_NET_ADMIN))
510 return -EACCES;
511
512 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
513 return -EPERM;
514
515 if (arg)
516 set_bit(HCI_RAW, &hdev->flags);
517 else
518 clear_bit(HCI_RAW, &hdev->flags);
519
520 return 0;
521
1da177e4 522 case HCIGETCONNINFO:
40be492f
MH
523 return hci_get_conn_info(hdev, (void __user *) arg);
524
525 case HCIGETAUTHINFO:
526 return hci_get_auth_info(hdev, (void __user *) arg);
1da177e4 527
f0358568
JH
528 case HCIBLOCKADDR:
529 if (!capable(CAP_NET_ADMIN))
530 return -EACCES;
b2a66aad 531 return hci_sock_blacklist_add(hdev, (void __user *) arg);
f0358568
JH
532
533 case HCIUNBLOCKADDR:
534 if (!capable(CAP_NET_ADMIN))
535 return -EACCES;
b2a66aad 536 return hci_sock_blacklist_del(hdev, (void __user *) arg);
f0358568 537
1da177e4
LT
538 default:
539 if (hdev->ioctl)
540 return hdev->ioctl(hdev, cmd, arg);
541 return -EINVAL;
542 }
543}
544
545static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
546{
547 struct sock *sk = sock->sk;
40be492f 548 void __user *argp = (void __user *) arg;
1da177e4
LT
549 int err;
550
551 BT_DBG("cmd %x arg %lx", cmd, arg);
552
553 switch (cmd) {
554 case HCIGETDEVLIST:
555 return hci_get_dev_list(argp);
556
557 case HCIGETDEVINFO:
558 return hci_get_dev_info(argp);
559
560 case HCIGETCONNLIST:
561 return hci_get_conn_list(argp);
562
563 case HCIDEVUP:
564 if (!capable(CAP_NET_ADMIN))
565 return -EACCES;
566 return hci_dev_open(arg);
567
568 case HCIDEVDOWN:
569 if (!capable(CAP_NET_ADMIN))
570 return -EACCES;
571 return hci_dev_close(arg);
572
573 case HCIDEVRESET:
574 if (!capable(CAP_NET_ADMIN))
575 return -EACCES;
576 return hci_dev_reset(arg);
577
578 case HCIDEVRESTAT:
579 if (!capable(CAP_NET_ADMIN))
580 return -EACCES;
581 return hci_dev_reset_stat(arg);
582
583 case HCISETSCAN:
584 case HCISETAUTH:
585 case HCISETENCRYPT:
586 case HCISETPTYPE:
587 case HCISETLINKPOL:
588 case HCISETLINKMODE:
589 case HCISETACLMTU:
590 case HCISETSCOMTU:
591 if (!capable(CAP_NET_ADMIN))
592 return -EACCES;
593 return hci_dev_cmd(cmd, argp);
594
595 case HCIINQUIRY:
596 return hci_inquiry(argp);
597
598 default:
599 lock_sock(sk);
600 err = hci_sock_bound_ioctl(sk, cmd, arg);
601 release_sock(sk);
602 return err;
603 }
604}
605
606static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
607{
0381101f 608 struct sockaddr_hci haddr;
1da177e4
LT
609 struct sock *sk = sock->sk;
610 struct hci_dev *hdev = NULL;
0381101f 611 int len, err = 0;
1da177e4
LT
612
613 BT_DBG("sock %p sk %p", sock, sk);
614
0381101f
JH
615 if (!addr)
616 return -EINVAL;
617
618 memset(&haddr, 0, sizeof(haddr));
619 len = min_t(unsigned int, sizeof(haddr), addr_len);
620 memcpy(&haddr, addr, len);
621
622 if (haddr.hci_family != AF_BLUETOOTH)
623 return -EINVAL;
624
1da177e4
LT
625 lock_sock(sk);
626
7cc2ade2 627 if (sk->sk_state == BT_BOUND) {
1da177e4
LT
628 err = -EALREADY;
629 goto done;
630 }
631
7cc2ade2
MH
632 switch (haddr.hci_channel) {
633 case HCI_CHANNEL_RAW:
634 if (hci_pi(sk)->hdev) {
635 err = -EALREADY;
1da177e4
LT
636 goto done;
637 }
638
7cc2ade2
MH
639 if (haddr.hci_dev != HCI_DEV_NONE) {
640 hdev = hci_dev_get(haddr.hci_dev);
641 if (!hdev) {
642 err = -ENODEV;
643 goto done;
644 }
645
646 atomic_inc(&hdev->promisc);
647 }
648
649 hci_pi(sk)->hdev = hdev;
650 break;
651
652 case HCI_CHANNEL_CONTROL:
4b95a24c 653 if (haddr.hci_dev != HCI_DEV_NONE) {
7cc2ade2
MH
654 err = -EINVAL;
655 goto done;
656 }
657
801f13bd
MH
658 if (!capable(CAP_NET_ADMIN)) {
659 err = -EPERM;
660 goto done;
661 }
662
7cc2ade2
MH
663 break;
664
cd82e61c
MH
665 case HCI_CHANNEL_MONITOR:
666 if (haddr.hci_dev != HCI_DEV_NONE) {
667 err = -EINVAL;
668 goto done;
669 }
670
671 if (!capable(CAP_NET_RAW)) {
672 err = -EPERM;
673 goto done;
674 }
675
676 send_monitor_replay(sk);
677
678 atomic_inc(&monitor_promisc);
679 break;
680
7cc2ade2
MH
681 default:
682 err = -EINVAL;
683 goto done;
1da177e4
LT
684 }
685
7cc2ade2 686
0381101f 687 hci_pi(sk)->channel = haddr.hci_channel;
1da177e4
LT
688 sk->sk_state = BT_BOUND;
689
690done:
691 release_sock(sk);
692 return err;
693}
694
695static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
696{
697 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
698 struct sock *sk = sock->sk;
7b005bd3 699 struct hci_dev *hdev = hci_pi(sk)->hdev;
1da177e4
LT
700
701 BT_DBG("sock %p sk %p", sock, sk);
702
7b005bd3
MH
703 if (!hdev)
704 return -EBADFD;
705
1da177e4
LT
706 lock_sock(sk);
707
708 *addr_len = sizeof(*haddr);
709 haddr->hci_family = AF_BLUETOOTH;
7b005bd3 710 haddr->hci_dev = hdev->id;
1da177e4
LT
711
712 release_sock(sk);
713 return 0;
714}
715
6039aa73
GP
716static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
717 struct sk_buff *skb)
1da177e4
LT
718{
719 __u32 mask = hci_pi(sk)->cmsg_mask;
720
0d48d939
MH
721 if (mask & HCI_CMSG_DIR) {
722 int incoming = bt_cb(skb)->incoming;
723 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
724 }
1da177e4 725
a61bbcf2 726 if (mask & HCI_CMSG_TSTAMP) {
f6e623a6
JFS
727#ifdef CONFIG_COMPAT
728 struct compat_timeval ctv;
729#endif
a61bbcf2 730 struct timeval tv;
767c5eb5
MH
731 void *data;
732 int len;
a61bbcf2
PM
733
734 skb_get_timestamp(skb, &tv);
767c5eb5 735
1da97f83
DM
736 data = &tv;
737 len = sizeof(tv);
738#ifdef CONFIG_COMPAT
da88cea1
L
739 if (!COMPAT_USE_64BIT_TIME &&
740 (msg->msg_flags & MSG_CMSG_COMPAT)) {
767c5eb5
MH
741 ctv.tv_sec = tv.tv_sec;
742 ctv.tv_usec = tv.tv_usec;
743 data = &ctv;
744 len = sizeof(ctv);
767c5eb5 745 }
1da97f83 746#endif
767c5eb5
MH
747
748 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
a61bbcf2 749 }
1da177e4 750}
8e87d142
YH
751
752static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
3bb3c755 753 struct msghdr *msg, size_t len, int flags)
1da177e4
LT
754{
755 int noblock = flags & MSG_DONTWAIT;
756 struct sock *sk = sock->sk;
757 struct sk_buff *skb;
758 int copied, err;
759
760 BT_DBG("sock %p, sk %p", sock, sk);
761
762 if (flags & (MSG_OOB))
763 return -EOPNOTSUPP;
764
765 if (sk->sk_state == BT_CLOSED)
766 return 0;
767
70f23020
AE
768 skb = skb_recv_datagram(sk, flags, noblock, &err);
769 if (!skb)
1da177e4
LT
770 return err;
771
772 msg->msg_namelen = 0;
773
774 copied = skb->len;
775 if (len < copied) {
776 msg->msg_flags |= MSG_TRUNC;
777 copied = len;
778 }
779
badff6d0 780 skb_reset_transport_header(skb);
1da177e4
LT
781 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
782
3a208627
MH
783 switch (hci_pi(sk)->channel) {
784 case HCI_CHANNEL_RAW:
785 hci_sock_cmsg(sk, msg, skb);
786 break;
97e0bdeb 787 case HCI_CHANNEL_CONTROL:
cd82e61c
MH
788 case HCI_CHANNEL_MONITOR:
789 sock_recv_timestamp(msg, sk, skb);
790 break;
3a208627 791 }
1da177e4
LT
792
793 skb_free_datagram(sk, skb);
794
795 return err ? : copied;
796}
797
8e87d142 798static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1da177e4
LT
799 struct msghdr *msg, size_t len)
800{
801 struct sock *sk = sock->sk;
802 struct hci_dev *hdev;
803 struct sk_buff *skb;
804 int err;
805
806 BT_DBG("sock %p sk %p", sock, sk);
807
808 if (msg->msg_flags & MSG_OOB)
809 return -EOPNOTSUPP;
810
811 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
812 return -EINVAL;
813
814 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
815 return -EINVAL;
816
817 lock_sock(sk);
818
0381101f
JH
819 switch (hci_pi(sk)->channel) {
820 case HCI_CHANNEL_RAW:
821 break;
822 case HCI_CHANNEL_CONTROL:
823 err = mgmt_control(sk, msg, len);
824 goto done;
cd82e61c
MH
825 case HCI_CHANNEL_MONITOR:
826 err = -EOPNOTSUPP;
827 goto done;
0381101f
JH
828 default:
829 err = -EINVAL;
830 goto done;
831 }
832
70f23020
AE
833 hdev = hci_pi(sk)->hdev;
834 if (!hdev) {
1da177e4
LT
835 err = -EBADFD;
836 goto done;
837 }
838
7e21addc
MH
839 if (!test_bit(HCI_UP, &hdev->flags)) {
840 err = -ENETDOWN;
841 goto done;
842 }
843
70f23020
AE
844 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
845 if (!skb)
1da177e4
LT
846 goto done;
847
848 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
849 err = -EFAULT;
850 goto drop;
851 }
852
0d48d939 853 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1da177e4
LT
854 skb_pull(skb, 1);
855 skb->dev = (void *) hdev;
856
0d48d939 857 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
83985319 858 u16 opcode = get_unaligned_le16(skb->data);
1da177e4
LT
859 u16 ogf = hci_opcode_ogf(opcode);
860 u16 ocf = hci_opcode_ocf(opcode);
861
862 if (((ogf > HCI_SFLT_MAX_OGF) ||
3bb3c755
GP
863 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
864 &hci_sec_filter.ocf_mask[ogf])) &&
865 !capable(CAP_NET_RAW)) {
1da177e4
LT
866 err = -EPERM;
867 goto drop;
868 }
869
a9de9248 870 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
1da177e4 871 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 872 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
873 } else {
874 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 875 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
876 }
877 } else {
878 if (!capable(CAP_NET_RAW)) {
879 err = -EPERM;
880 goto drop;
881 }
882
883 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 884 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
885 }
886
887 err = len;
888
889done:
890 release_sock(sk);
891 return err;
892
893drop:
894 kfree_skb(skb);
895 goto done;
896}
897
b7058842 898static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
1da177e4
LT
899{
900 struct hci_ufilter uf = { .opcode = 0 };
901 struct sock *sk = sock->sk;
902 int err = 0, opt = 0;
903
904 BT_DBG("sk %p, opt %d", sk, optname);
905
906 lock_sock(sk);
907
2f39cdb7
MH
908 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
909 err = -EINVAL;
910 goto done;
911 }
912
1da177e4
LT
913 switch (optname) {
914 case HCI_DATA_DIR:
915 if (get_user(opt, (int __user *)optval)) {
916 err = -EFAULT;
917 break;
918 }
919
920 if (opt)
921 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
922 else
923 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
924 break;
925
926 case HCI_TIME_STAMP:
927 if (get_user(opt, (int __user *)optval)) {
928 err = -EFAULT;
929 break;
930 }
931
932 if (opt)
933 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
934 else
935 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
936 break;
937
938 case HCI_FILTER:
0878b666
MH
939 {
940 struct hci_filter *f = &hci_pi(sk)->filter;
941
942 uf.type_mask = f->type_mask;
943 uf.opcode = f->opcode;
944 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
945 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
946 }
947
1da177e4
LT
948 len = min_t(unsigned int, len, sizeof(uf));
949 if (copy_from_user(&uf, optval, len)) {
950 err = -EFAULT;
951 break;
952 }
953
954 if (!capable(CAP_NET_RAW)) {
955 uf.type_mask &= hci_sec_filter.type_mask;
956 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
957 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
958 }
959
960 {
961 struct hci_filter *f = &hci_pi(sk)->filter;
962
963 f->type_mask = uf.type_mask;
964 f->opcode = uf.opcode;
965 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
966 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
967 }
8e87d142 968 break;
1da177e4
LT
969
970 default:
971 err = -ENOPROTOOPT;
972 break;
973 }
974
2f39cdb7 975done:
1da177e4
LT
976 release_sock(sk);
977 return err;
978}
979
980static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
981{
982 struct hci_ufilter uf;
983 struct sock *sk = sock->sk;
cedc5469
MH
984 int len, opt, err = 0;
985
986 BT_DBG("sk %p, opt %d", sk, optname);
1da177e4
LT
987
988 if (get_user(len, optlen))
989 return -EFAULT;
990
cedc5469
MH
991 lock_sock(sk);
992
993 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
994 err = -EINVAL;
995 goto done;
996 }
997
1da177e4
LT
998 switch (optname) {
999 case HCI_DATA_DIR:
1000 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1001 opt = 1;
8e87d142 1002 else
1da177e4
LT
1003 opt = 0;
1004
1005 if (put_user(opt, optval))
cedc5469 1006 err = -EFAULT;
1da177e4
LT
1007 break;
1008
1009 case HCI_TIME_STAMP:
1010 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1011 opt = 1;
8e87d142 1012 else
1da177e4
LT
1013 opt = 0;
1014
1015 if (put_user(opt, optval))
cedc5469 1016 err = -EFAULT;
1da177e4
LT
1017 break;
1018
1019 case HCI_FILTER:
1020 {
1021 struct hci_filter *f = &hci_pi(sk)->filter;
1022
1023 uf.type_mask = f->type_mask;
1024 uf.opcode = f->opcode;
1025 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1026 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1027 }
1028
1029 len = min_t(unsigned int, len, sizeof(uf));
1030 if (copy_to_user(optval, &uf, len))
cedc5469 1031 err = -EFAULT;
1da177e4
LT
1032 break;
1033
1034 default:
cedc5469 1035 err = -ENOPROTOOPT;
1da177e4
LT
1036 break;
1037 }
1038
cedc5469
MH
1039done:
1040 release_sock(sk);
1041 return err;
1da177e4
LT
1042}
1043
90ddc4f0 1044static const struct proto_ops hci_sock_ops = {
1da177e4
LT
1045 .family = PF_BLUETOOTH,
1046 .owner = THIS_MODULE,
1047 .release = hci_sock_release,
1048 .bind = hci_sock_bind,
1049 .getname = hci_sock_getname,
1050 .sendmsg = hci_sock_sendmsg,
1051 .recvmsg = hci_sock_recvmsg,
1052 .ioctl = hci_sock_ioctl,
1053 .poll = datagram_poll,
1054 .listen = sock_no_listen,
1055 .shutdown = sock_no_shutdown,
1056 .setsockopt = hci_sock_setsockopt,
1057 .getsockopt = hci_sock_getsockopt,
1058 .connect = sock_no_connect,
1059 .socketpair = sock_no_socketpair,
1060 .accept = sock_no_accept,
1061 .mmap = sock_no_mmap
1062};
1063
1064static struct proto hci_sk_proto = {
1065 .name = "HCI",
1066 .owner = THIS_MODULE,
1067 .obj_size = sizeof(struct hci_pinfo)
1068};
1069
3f378b68
EP
1070static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1071 int kern)
1da177e4
LT
1072{
1073 struct sock *sk;
1074
1075 BT_DBG("sock %p", sock);
1076
1077 if (sock->type != SOCK_RAW)
1078 return -ESOCKTNOSUPPORT;
1079
1080 sock->ops = &hci_sock_ops;
1081
6257ff21 1082 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1da177e4
LT
1083 if (!sk)
1084 return -ENOMEM;
1085
1086 sock_init_data(sock, sk);
1087
1088 sock_reset_flag(sk, SOCK_ZAPPED);
1089
1090 sk->sk_protocol = protocol;
1091
1092 sock->state = SS_UNCONNECTED;
1093 sk->sk_state = BT_OPEN;
1094
1095 bt_sock_link(&hci_sk_list, sk);
1096 return 0;
1097}
1098
ec1b4cf7 1099static const struct net_proto_family hci_sock_family_ops = {
1da177e4
LT
1100 .family = PF_BLUETOOTH,
1101 .owner = THIS_MODULE,
1102 .create = hci_sock_create,
1103};
1104
1da177e4
LT
1105int __init hci_sock_init(void)
1106{
1107 int err;
1108
1109 err = proto_register(&hci_sk_proto, 0);
1110 if (err < 0)
1111 return err;
1112
1113 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1114 if (err < 0)
1115 goto error;
1116
1da177e4
LT
1117 BT_INFO("HCI socket layer initialized");
1118
1119 return 0;
1120
1121error:
1122 BT_ERR("HCI socket registration failed");
1123 proto_unregister(&hci_sk_proto);
1124 return err;
1125}
1126
b7440a14 1127void hci_sock_cleanup(void)
1da177e4
LT
1128{
1129 if (bt_sock_unregister(BTPROTO_HCI) < 0)
1130 BT_ERR("HCI socket unregistration failed");
1131
1da177e4 1132 proto_unregister(&hci_sk_proto);
1da177e4 1133}
This page took 0.643067 seconds and 5 git commands to generate.