Bluetooth: Introduce trusted flag for management control sockets
[deliverable/linux.git] / net / bluetooth / hci_sock.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
8c520a59 27#include <linux/export.h>
1da177e4
LT
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
cd82e61c 32#include <net/bluetooth/hci_mon.h>
1da177e4 33
801c1e8d
JH
34static LIST_HEAD(mgmt_chan_list);
35static DEFINE_MUTEX(mgmt_chan_list_lock);
36
cd82e61c
MH
37static atomic_t monitor_promisc = ATOMIC_INIT(0);
38
1da177e4
LT
39/* ----- HCI socket interface ----- */
40
863def58
MH
41/* Socket info */
42#define hci_pi(sk) ((struct hci_pinfo *) sk)
43
44struct hci_pinfo {
45 struct bt_sock bt;
46 struct hci_dev *hdev;
47 struct hci_filter filter;
48 __u32 cmsg_mask;
49 unsigned short channel;
6befc644 50 unsigned long flags;
863def58
MH
51};
52
6befc644
MH
53void hci_sock_set_flag(struct sock *sk, int nr)
54{
55 set_bit(nr, &hci_pi(sk)->flags);
56}
57
58void hci_sock_clear_flag(struct sock *sk, int nr)
59{
60 clear_bit(nr, &hci_pi(sk)->flags);
61}
62
9391976a 63static inline int hci_test_bit(int nr, const void *addr)
1da177e4 64{
9391976a 65 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
1da177e4
LT
66}
67
68/* Security filter */
3ad254f7
MH
69#define HCI_SFLT_MAX_OGF 5
70
71struct hci_sec_filter {
72 __u32 type_mask;
73 __u32 event_mask[2];
74 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
75};
76
7e67c112 77static const struct hci_sec_filter hci_sec_filter = {
1da177e4
LT
78 /* Packet types */
79 0x10,
80 /* Events */
dd7f5527 81 { 0x1000d9fe, 0x0000b00c },
1da177e4
LT
82 /* Commands */
83 {
84 { 0x0 },
85 /* OGF_LINK_CTL */
7c631a67 86 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
1da177e4 87 /* OGF_LINK_POLICY */
7c631a67 88 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
1da177e4 89 /* OGF_HOST_CTL */
7c631a67 90 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
1da177e4 91 /* OGF_INFO_PARAM */
7c631a67 92 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
1da177e4 93 /* OGF_STATUS_PARAM */
7c631a67 94 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
1da177e4
LT
95 }
96};
97
98static struct bt_sock_list hci_sk_list = {
d5fb2962 99 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
1da177e4
LT
100};
101
f81fe64f
MH
102static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
103{
104 struct hci_filter *flt;
105 int flt_type, flt_event;
106
107 /* Apply filter */
108 flt = &hci_pi(sk)->filter;
109
110 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
111 flt_type = 0;
112 else
113 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
114
115 if (!test_bit(flt_type, &flt->type_mask))
116 return true;
117
118 /* Extra filter for event packets only */
119 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
120 return false;
121
122 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
123
124 if (!hci_test_bit(flt_event, &flt->event_mask))
125 return true;
126
127 /* Check filter only when opcode is set */
128 if (!flt->opcode)
129 return false;
130
131 if (flt_event == HCI_EV_CMD_COMPLETE &&
132 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
133 return true;
134
135 if (flt_event == HCI_EV_CMD_STATUS &&
136 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
137 return true;
138
139 return false;
140}
141
1da177e4 142/* Send frame to RAW socket */
470fe1b5 143void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
144{
145 struct sock *sk;
e0edf373 146 struct sk_buff *skb_copy = NULL;
1da177e4
LT
147
148 BT_DBG("hdev %p len %d", hdev, skb->len);
149
150 read_lock(&hci_sk_list.lock);
470fe1b5 151
b67bfe0d 152 sk_for_each(sk, &hci_sk_list.head) {
1da177e4
LT
153 struct sk_buff *nskb;
154
155 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
156 continue;
157
158 /* Don't send frame to the socket it came from */
159 if (skb->sk == sk)
160 continue;
161
23500189
MH
162 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
163 if (is_filtered_packet(sk, skb))
164 continue;
165 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
166 if (!bt_cb(skb)->incoming)
167 continue;
168 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
169 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
170 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
171 continue;
172 } else {
173 /* Don't send frame to other channel types */
1da177e4 174 continue;
23500189 175 }
1da177e4 176
e0edf373
MH
177 if (!skb_copy) {
178 /* Create a private copy with headroom */
bad93e9d 179 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
e0edf373
MH
180 if (!skb_copy)
181 continue;
182
183 /* Put type byte before the data */
184 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
185 }
186
187 nskb = skb_clone(skb_copy, GFP_ATOMIC);
70f23020 188 if (!nskb)
1da177e4
LT
189 continue;
190
470fe1b5
MH
191 if (sock_queue_rcv_skb(sk, nskb))
192 kfree_skb(nskb);
193 }
194
195 read_unlock(&hci_sk_list.lock);
e0edf373
MH
196
197 kfree_skb(skb_copy);
470fe1b5
MH
198}
199
7129069e
JH
200/* Send frame to sockets with specific channel */
201void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
202 struct sock *skip_sk)
470fe1b5
MH
203{
204 struct sock *sk;
470fe1b5 205
7129069e 206 BT_DBG("channel %u len %d", channel, skb->len);
470fe1b5
MH
207
208 read_lock(&hci_sk_list.lock);
209
b67bfe0d 210 sk_for_each(sk, &hci_sk_list.head) {
470fe1b5
MH
211 struct sk_buff *nskb;
212
213 /* Skip the original socket */
214 if (sk == skip_sk)
215 continue;
216
217 if (sk->sk_state != BT_BOUND)
218 continue;
219
7129069e 220 if (hci_pi(sk)->channel != channel)
d7f72f61
MH
221 continue;
222
223 nskb = skb_clone(skb, GFP_ATOMIC);
224 if (!nskb)
225 continue;
226
227 if (sock_queue_rcv_skb(sk, nskb))
228 kfree_skb(nskb);
229 }
230
231 read_unlock(&hci_sk_list.lock);
232}
233
17711c62
MH
234/* Send frame to sockets with specific channel flag set */
235void hci_send_to_flagged_channel(unsigned short channel, struct sk_buff *skb,
236 int flag)
237{
238 struct sock *sk;
239
240 BT_DBG("channel %u len %d", channel, skb->len);
241
242 read_lock(&hci_sk_list.lock);
243
244 sk_for_each(sk, &hci_sk_list.head) {
245 struct sk_buff *nskb;
246
247 if (!test_bit(flag, &hci_pi(sk)->flags))
248 continue;
249
250 if (sk->sk_state != BT_BOUND)
251 continue;
252
253 if (hci_pi(sk)->channel != channel)
254 continue;
255
256 nskb = skb_clone(skb, GFP_ATOMIC);
257 if (!nskb)
258 continue;
259
260 if (sock_queue_rcv_skb(sk, nskb))
261 kfree_skb(nskb);
262 }
263
264 read_unlock(&hci_sk_list.lock);
265}
266
cd82e61c
MH
267/* Send frame to monitor socket */
268void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
269{
cd82e61c 270 struct sk_buff *skb_copy = NULL;
2b531294 271 struct hci_mon_hdr *hdr;
cd82e61c
MH
272 __le16 opcode;
273
274 if (!atomic_read(&monitor_promisc))
275 return;
276
277 BT_DBG("hdev %p len %d", hdev, skb->len);
278
279 switch (bt_cb(skb)->pkt_type) {
280 case HCI_COMMAND_PKT:
dcf4adbf 281 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
cd82e61c
MH
282 break;
283 case HCI_EVENT_PKT:
dcf4adbf 284 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
cd82e61c
MH
285 break;
286 case HCI_ACLDATA_PKT:
287 if (bt_cb(skb)->incoming)
dcf4adbf 288 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
cd82e61c 289 else
dcf4adbf 290 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
cd82e61c
MH
291 break;
292 case HCI_SCODATA_PKT:
293 if (bt_cb(skb)->incoming)
dcf4adbf 294 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
cd82e61c 295 else
dcf4adbf 296 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
cd82e61c
MH
297 break;
298 default:
299 return;
300 }
301
2b531294
MH
302 /* Create a private copy with headroom */
303 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
304 if (!skb_copy)
305 return;
306
307 /* Put header before the data */
308 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
309 hdr->opcode = opcode;
310 hdr->index = cpu_to_le16(hdev->id);
311 hdr->len = cpu_to_le16(skb->len);
312
03f310ef 313 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, NULL);
cd82e61c
MH
314 kfree_skb(skb_copy);
315}
316
cd82e61c
MH
317static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
318{
319 struct hci_mon_hdr *hdr;
320 struct hci_mon_new_index *ni;
321 struct sk_buff *skb;
322 __le16 opcode;
323
324 switch (event) {
325 case HCI_DEV_REG:
326 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
327 if (!skb)
328 return NULL;
329
330 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
331 ni->type = hdev->dev_type;
332 ni->bus = hdev->bus;
333 bacpy(&ni->bdaddr, &hdev->bdaddr);
334 memcpy(ni->name, hdev->name, 8);
335
dcf4adbf 336 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
cd82e61c
MH
337 break;
338
339 case HCI_DEV_UNREG:
340 skb = bt_skb_alloc(0, GFP_ATOMIC);
341 if (!skb)
342 return NULL;
343
dcf4adbf 344 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
cd82e61c
MH
345 break;
346
347 default:
348 return NULL;
349 }
350
351 __net_timestamp(skb);
352
353 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
354 hdr->opcode = opcode;
355 hdr->index = cpu_to_le16(hdev->id);
356 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
357
358 return skb;
359}
360
361static void send_monitor_replay(struct sock *sk)
362{
363 struct hci_dev *hdev;
364
365 read_lock(&hci_dev_list_lock);
366
367 list_for_each_entry(hdev, &hci_dev_list, list) {
368 struct sk_buff *skb;
369
370 skb = create_monitor_event(hdev, HCI_DEV_REG);
371 if (!skb)
372 continue;
373
374 if (sock_queue_rcv_skb(sk, skb))
375 kfree_skb(skb);
376 }
377
378 read_unlock(&hci_dev_list_lock);
379}
380
040030ef
MH
381/* Generate internal stack event */
382static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
383{
384 struct hci_event_hdr *hdr;
385 struct hci_ev_stack_internal *ev;
386 struct sk_buff *skb;
387
388 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
389 if (!skb)
390 return;
391
392 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
393 hdr->evt = HCI_EV_STACK_INTERNAL;
394 hdr->plen = sizeof(*ev) + dlen;
395
396 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
397 ev->type = type;
398 memcpy(ev->data, data, dlen);
399
400 bt_cb(skb)->incoming = 1;
401 __net_timestamp(skb);
402
403 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
040030ef
MH
404 hci_send_to_sock(hdev, skb);
405 kfree_skb(skb);
406}
407
408void hci_sock_dev_event(struct hci_dev *hdev, int event)
409{
410 struct hci_ev_si_device ev;
411
412 BT_DBG("hdev %s event %d", hdev->name, event);
413
cd82e61c
MH
414 /* Send event to monitor */
415 if (atomic_read(&monitor_promisc)) {
416 struct sk_buff *skb;
417
418 skb = create_monitor_event(hdev, event);
419 if (skb) {
03f310ef 420 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, NULL);
cd82e61c
MH
421 kfree_skb(skb);
422 }
423 }
424
040030ef
MH
425 /* Send event to sockets */
426 ev.event = event;
427 ev.dev_id = hdev->id;
428 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
429
430 if (event == HCI_DEV_UNREG) {
431 struct sock *sk;
040030ef
MH
432
433 /* Detach sockets from device */
434 read_lock(&hci_sk_list.lock);
b67bfe0d 435 sk_for_each(sk, &hci_sk_list.head) {
040030ef
MH
436 bh_lock_sock_nested(sk);
437 if (hci_pi(sk)->hdev == hdev) {
438 hci_pi(sk)->hdev = NULL;
439 sk->sk_err = EPIPE;
440 sk->sk_state = BT_OPEN;
441 sk->sk_state_change(sk);
442
443 hci_dev_put(hdev);
444 }
445 bh_unlock_sock(sk);
446 }
447 read_unlock(&hci_sk_list.lock);
448 }
449}
450
801c1e8d
JH
451static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
452{
453 struct hci_mgmt_chan *c;
454
455 list_for_each_entry(c, &mgmt_chan_list, list) {
456 if (c->channel == channel)
457 return c;
458 }
459
460 return NULL;
461}
462
463static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
464{
465 struct hci_mgmt_chan *c;
466
467 mutex_lock(&mgmt_chan_list_lock);
468 c = __hci_mgmt_chan_find(channel);
469 mutex_unlock(&mgmt_chan_list_lock);
470
471 return c;
472}
473
474int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
475{
476 if (c->channel < HCI_CHANNEL_CONTROL)
477 return -EINVAL;
478
479 mutex_lock(&mgmt_chan_list_lock);
480 if (__hci_mgmt_chan_find(c->channel)) {
481 mutex_unlock(&mgmt_chan_list_lock);
482 return -EALREADY;
483 }
484
485 list_add_tail(&c->list, &mgmt_chan_list);
486
487 mutex_unlock(&mgmt_chan_list_lock);
488
489 return 0;
490}
491EXPORT_SYMBOL(hci_mgmt_chan_register);
492
493void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
494{
495 mutex_lock(&mgmt_chan_list_lock);
496 list_del(&c->list);
497 mutex_unlock(&mgmt_chan_list_lock);
498}
499EXPORT_SYMBOL(hci_mgmt_chan_unregister);
500
1da177e4
LT
501static int hci_sock_release(struct socket *sock)
502{
503 struct sock *sk = sock->sk;
7b005bd3 504 struct hci_dev *hdev;
1da177e4
LT
505
506 BT_DBG("sock %p sk %p", sock, sk);
507
508 if (!sk)
509 return 0;
510
7b005bd3
MH
511 hdev = hci_pi(sk)->hdev;
512
cd82e61c
MH
513 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
514 atomic_dec(&monitor_promisc);
515
1da177e4
LT
516 bt_sock_unlink(&hci_sk_list, sk);
517
518 if (hdev) {
23500189 519 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
0602a8ad 520 mgmt_index_added(hdev);
a358dc11 521 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
23500189
MH
522 hci_dev_close(hdev->id);
523 }
524
1da177e4
LT
525 atomic_dec(&hdev->promisc);
526 hci_dev_put(hdev);
527 }
528
529 sock_orphan(sk);
530
531 skb_queue_purge(&sk->sk_receive_queue);
532 skb_queue_purge(&sk->sk_write_queue);
533
534 sock_put(sk);
535 return 0;
536}
537
b2a66aad 538static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
f0358568
JH
539{
540 bdaddr_t bdaddr;
5e762444 541 int err;
f0358568
JH
542
543 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
544 return -EFAULT;
545
09fd0de5 546 hci_dev_lock(hdev);
5e762444 547
dcc36c16 548 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
5e762444 549
09fd0de5 550 hci_dev_unlock(hdev);
5e762444
AJ
551
552 return err;
f0358568
JH
553}
554
b2a66aad 555static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
f0358568
JH
556{
557 bdaddr_t bdaddr;
5e762444 558 int err;
f0358568
JH
559
560 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
561 return -EFAULT;
562
09fd0de5 563 hci_dev_lock(hdev);
5e762444 564
dcc36c16 565 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
5e762444 566
09fd0de5 567 hci_dev_unlock(hdev);
5e762444
AJ
568
569 return err;
f0358568
JH
570}
571
8e87d142 572/* Ioctls that require bound socket */
6039aa73
GP
573static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
574 unsigned long arg)
1da177e4
LT
575{
576 struct hci_dev *hdev = hci_pi(sk)->hdev;
577
578 if (!hdev)
579 return -EBADFD;
580
d7a5a11d 581 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
582 return -EBUSY;
583
d7a5a11d 584 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
fee746b0
MH
585 return -EOPNOTSUPP;
586
5b69bef5
MH
587 if (hdev->dev_type != HCI_BREDR)
588 return -EOPNOTSUPP;
589
1da177e4
LT
590 switch (cmd) {
591 case HCISETRAW:
592 if (!capable(CAP_NET_ADMIN))
bf5b30b8 593 return -EPERM;
db596681 594 return -EOPNOTSUPP;
1da177e4 595
1da177e4 596 case HCIGETCONNINFO:
40be492f
MH
597 return hci_get_conn_info(hdev, (void __user *) arg);
598
599 case HCIGETAUTHINFO:
600 return hci_get_auth_info(hdev, (void __user *) arg);
1da177e4 601
f0358568
JH
602 case HCIBLOCKADDR:
603 if (!capable(CAP_NET_ADMIN))
bf5b30b8 604 return -EPERM;
b2a66aad 605 return hci_sock_blacklist_add(hdev, (void __user *) arg);
f0358568
JH
606
607 case HCIUNBLOCKADDR:
608 if (!capable(CAP_NET_ADMIN))
bf5b30b8 609 return -EPERM;
b2a66aad 610 return hci_sock_blacklist_del(hdev, (void __user *) arg);
1da177e4 611 }
0736cfa8 612
324d36ed 613 return -ENOIOCTLCMD;
1da177e4
LT
614}
615
8fc9ced3
GP
616static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
617 unsigned long arg)
1da177e4 618{
40be492f 619 void __user *argp = (void __user *) arg;
0736cfa8 620 struct sock *sk = sock->sk;
1da177e4
LT
621 int err;
622
623 BT_DBG("cmd %x arg %lx", cmd, arg);
624
c1c4f956
MH
625 lock_sock(sk);
626
627 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
628 err = -EBADFD;
629 goto done;
630 }
631
632 release_sock(sk);
633
1da177e4
LT
634 switch (cmd) {
635 case HCIGETDEVLIST:
636 return hci_get_dev_list(argp);
637
638 case HCIGETDEVINFO:
639 return hci_get_dev_info(argp);
640
641 case HCIGETCONNLIST:
642 return hci_get_conn_list(argp);
643
644 case HCIDEVUP:
645 if (!capable(CAP_NET_ADMIN))
bf5b30b8 646 return -EPERM;
1da177e4
LT
647 return hci_dev_open(arg);
648
649 case HCIDEVDOWN:
650 if (!capable(CAP_NET_ADMIN))
bf5b30b8 651 return -EPERM;
1da177e4
LT
652 return hci_dev_close(arg);
653
654 case HCIDEVRESET:
655 if (!capable(CAP_NET_ADMIN))
bf5b30b8 656 return -EPERM;
1da177e4
LT
657 return hci_dev_reset(arg);
658
659 case HCIDEVRESTAT:
660 if (!capable(CAP_NET_ADMIN))
bf5b30b8 661 return -EPERM;
1da177e4
LT
662 return hci_dev_reset_stat(arg);
663
664 case HCISETSCAN:
665 case HCISETAUTH:
666 case HCISETENCRYPT:
667 case HCISETPTYPE:
668 case HCISETLINKPOL:
669 case HCISETLINKMODE:
670 case HCISETACLMTU:
671 case HCISETSCOMTU:
672 if (!capable(CAP_NET_ADMIN))
bf5b30b8 673 return -EPERM;
1da177e4
LT
674 return hci_dev_cmd(cmd, argp);
675
676 case HCIINQUIRY:
677 return hci_inquiry(argp);
1da177e4 678 }
c1c4f956
MH
679
680 lock_sock(sk);
681
682 err = hci_sock_bound_ioctl(sk, cmd, arg);
683
684done:
685 release_sock(sk);
686 return err;
1da177e4
LT
687}
688
8fc9ced3
GP
689static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
690 int addr_len)
1da177e4 691{
0381101f 692 struct sockaddr_hci haddr;
1da177e4
LT
693 struct sock *sk = sock->sk;
694 struct hci_dev *hdev = NULL;
0381101f 695 int len, err = 0;
1da177e4
LT
696
697 BT_DBG("sock %p sk %p", sock, sk);
698
0381101f
JH
699 if (!addr)
700 return -EINVAL;
701
702 memset(&haddr, 0, sizeof(haddr));
703 len = min_t(unsigned int, sizeof(haddr), addr_len);
704 memcpy(&haddr, addr, len);
705
706 if (haddr.hci_family != AF_BLUETOOTH)
707 return -EINVAL;
708
1da177e4
LT
709 lock_sock(sk);
710
7cc2ade2 711 if (sk->sk_state == BT_BOUND) {
1da177e4
LT
712 err = -EALREADY;
713 goto done;
714 }
715
7cc2ade2
MH
716 switch (haddr.hci_channel) {
717 case HCI_CHANNEL_RAW:
718 if (hci_pi(sk)->hdev) {
719 err = -EALREADY;
1da177e4
LT
720 goto done;
721 }
722
7cc2ade2
MH
723 if (haddr.hci_dev != HCI_DEV_NONE) {
724 hdev = hci_dev_get(haddr.hci_dev);
725 if (!hdev) {
726 err = -ENODEV;
727 goto done;
728 }
729
730 atomic_inc(&hdev->promisc);
731 }
732
733 hci_pi(sk)->hdev = hdev;
734 break;
735
23500189
MH
736 case HCI_CHANNEL_USER:
737 if (hci_pi(sk)->hdev) {
738 err = -EALREADY;
739 goto done;
740 }
741
742 if (haddr.hci_dev == HCI_DEV_NONE) {
743 err = -EINVAL;
744 goto done;
745 }
746
10a8b86f 747 if (!capable(CAP_NET_ADMIN)) {
23500189
MH
748 err = -EPERM;
749 goto done;
750 }
751
752 hdev = hci_dev_get(haddr.hci_dev);
753 if (!hdev) {
754 err = -ENODEV;
755 goto done;
756 }
757
758 if (test_bit(HCI_UP, &hdev->flags) ||
759 test_bit(HCI_INIT, &hdev->flags) ||
d7a5a11d
MH
760 hci_dev_test_flag(hdev, HCI_SETUP) ||
761 hci_dev_test_flag(hdev, HCI_CONFIG)) {
23500189
MH
762 err = -EBUSY;
763 hci_dev_put(hdev);
764 goto done;
765 }
766
238be788 767 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
23500189
MH
768 err = -EUSERS;
769 hci_dev_put(hdev);
770 goto done;
771 }
772
0602a8ad 773 mgmt_index_removed(hdev);
23500189
MH
774
775 err = hci_dev_open(hdev->id);
776 if (err) {
a358dc11 777 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
0602a8ad 778 mgmt_index_added(hdev);
23500189
MH
779 hci_dev_put(hdev);
780 goto done;
781 }
782
783 atomic_inc(&hdev->promisc);
784
785 hci_pi(sk)->hdev = hdev;
786 break;
787
cd82e61c
MH
788 case HCI_CHANNEL_MONITOR:
789 if (haddr.hci_dev != HCI_DEV_NONE) {
790 err = -EINVAL;
791 goto done;
792 }
793
794 if (!capable(CAP_NET_RAW)) {
795 err = -EPERM;
796 goto done;
797 }
798
50ebc055
MH
799 /* The monitor interface is restricted to CAP_NET_RAW
800 * capabilities and with that implicitly trusted.
801 */
802 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
803
cd82e61c
MH
804 send_monitor_replay(sk);
805
806 atomic_inc(&monitor_promisc);
807 break;
808
7cc2ade2 809 default:
801c1e8d
JH
810 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
811 err = -EINVAL;
812 goto done;
813 }
814
815 if (haddr.hci_dev != HCI_DEV_NONE) {
816 err = -EINVAL;
817 goto done;
818 }
819
820 if (!capable(CAP_NET_ADMIN)) {
821 err = -EPERM;
822 goto done;
823 }
824
50ebc055
MH
825 /* Since the access to control channels is currently
826 * restricted to CAP_NET_ADMIN capabilities, every
827 * socket is implicitly trusted.
828 */
829 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
830
f9207338
MH
831 /* At the moment the index and unconfigured index events
832 * are enabled unconditionally. Setting them on each
833 * socket when binding keeps this functionality. They
834 * however might be cleared later and then sending of these
835 * events will be disabled, but that is then intentional.
836 */
837 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
838 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
839 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
840 }
801c1e8d 841 break;
1da177e4
LT
842 }
843
7cc2ade2 844
0381101f 845 hci_pi(sk)->channel = haddr.hci_channel;
1da177e4
LT
846 sk->sk_state = BT_BOUND;
847
848done:
849 release_sock(sk);
850 return err;
851}
852
8fc9ced3
GP
853static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
854 int *addr_len, int peer)
1da177e4
LT
855{
856 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
857 struct sock *sk = sock->sk;
9d4b68b2
MH
858 struct hci_dev *hdev;
859 int err = 0;
1da177e4
LT
860
861 BT_DBG("sock %p sk %p", sock, sk);
862
06f43cbc
MH
863 if (peer)
864 return -EOPNOTSUPP;
865
1da177e4
LT
866 lock_sock(sk);
867
9d4b68b2
MH
868 hdev = hci_pi(sk)->hdev;
869 if (!hdev) {
870 err = -EBADFD;
871 goto done;
872 }
873
1da177e4
LT
874 *addr_len = sizeof(*haddr);
875 haddr->hci_family = AF_BLUETOOTH;
7b005bd3 876 haddr->hci_dev = hdev->id;
9d4b68b2 877 haddr->hci_channel= hci_pi(sk)->channel;
1da177e4 878
9d4b68b2 879done:
1da177e4 880 release_sock(sk);
9d4b68b2 881 return err;
1da177e4
LT
882}
883
6039aa73
GP
884static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
885 struct sk_buff *skb)
1da177e4
LT
886{
887 __u32 mask = hci_pi(sk)->cmsg_mask;
888
0d48d939
MH
889 if (mask & HCI_CMSG_DIR) {
890 int incoming = bt_cb(skb)->incoming;
8fc9ced3
GP
891 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
892 &incoming);
0d48d939 893 }
1da177e4 894
a61bbcf2 895 if (mask & HCI_CMSG_TSTAMP) {
f6e623a6
JFS
896#ifdef CONFIG_COMPAT
897 struct compat_timeval ctv;
898#endif
a61bbcf2 899 struct timeval tv;
767c5eb5
MH
900 void *data;
901 int len;
a61bbcf2
PM
902
903 skb_get_timestamp(skb, &tv);
767c5eb5 904
1da97f83
DM
905 data = &tv;
906 len = sizeof(tv);
907#ifdef CONFIG_COMPAT
da88cea1
L
908 if (!COMPAT_USE_64BIT_TIME &&
909 (msg->msg_flags & MSG_CMSG_COMPAT)) {
767c5eb5
MH
910 ctv.tv_sec = tv.tv_sec;
911 ctv.tv_usec = tv.tv_usec;
912 data = &ctv;
913 len = sizeof(ctv);
767c5eb5 914 }
1da97f83 915#endif
767c5eb5
MH
916
917 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
a61bbcf2 918 }
1da177e4 919}
8e87d142 920
1b784140
YX
921static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
922 int flags)
1da177e4
LT
923{
924 int noblock = flags & MSG_DONTWAIT;
925 struct sock *sk = sock->sk;
926 struct sk_buff *skb;
927 int copied, err;
928
929 BT_DBG("sock %p, sk %p", sock, sk);
930
931 if (flags & (MSG_OOB))
932 return -EOPNOTSUPP;
933
934 if (sk->sk_state == BT_CLOSED)
935 return 0;
936
70f23020
AE
937 skb = skb_recv_datagram(sk, flags, noblock, &err);
938 if (!skb)
1da177e4
LT
939 return err;
940
1da177e4
LT
941 copied = skb->len;
942 if (len < copied) {
943 msg->msg_flags |= MSG_TRUNC;
944 copied = len;
945 }
946
badff6d0 947 skb_reset_transport_header(skb);
51f3d02b 948 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1da177e4 949
3a208627
MH
950 switch (hci_pi(sk)->channel) {
951 case HCI_CHANNEL_RAW:
952 hci_sock_cmsg(sk, msg, skb);
953 break;
23500189 954 case HCI_CHANNEL_USER:
cd82e61c
MH
955 case HCI_CHANNEL_MONITOR:
956 sock_recv_timestamp(msg, sk, skb);
957 break;
801c1e8d
JH
958 default:
959 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
960 sock_recv_timestamp(msg, sk, skb);
961 break;
3a208627 962 }
1da177e4
LT
963
964 skb_free_datagram(sk, skb);
965
966 return err ? : copied;
967}
968
1b784140
YX
969static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
970 size_t len)
1da177e4
LT
971{
972 struct sock *sk = sock->sk;
801c1e8d 973 struct hci_mgmt_chan *chan;
1da177e4
LT
974 struct hci_dev *hdev;
975 struct sk_buff *skb;
976 int err;
977
978 BT_DBG("sock %p sk %p", sock, sk);
979
980 if (msg->msg_flags & MSG_OOB)
981 return -EOPNOTSUPP;
982
983 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
984 return -EINVAL;
985
986 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
987 return -EINVAL;
988
989 lock_sock(sk);
990
0381101f
JH
991 switch (hci_pi(sk)->channel) {
992 case HCI_CHANNEL_RAW:
23500189 993 case HCI_CHANNEL_USER:
0381101f 994 break;
cd82e61c
MH
995 case HCI_CHANNEL_MONITOR:
996 err = -EOPNOTSUPP;
997 goto done;
0381101f 998 default:
801c1e8d
JH
999 mutex_lock(&mgmt_chan_list_lock);
1000 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1001 if (chan)
6d785aa3 1002 err = mgmt_control(chan, sk, msg, len);
801c1e8d
JH
1003 else
1004 err = -EINVAL;
1005
1006 mutex_unlock(&mgmt_chan_list_lock);
0381101f
JH
1007 goto done;
1008 }
1009
70f23020
AE
1010 hdev = hci_pi(sk)->hdev;
1011 if (!hdev) {
1da177e4
LT
1012 err = -EBADFD;
1013 goto done;
1014 }
1015
7e21addc
MH
1016 if (!test_bit(HCI_UP, &hdev->flags)) {
1017 err = -ENETDOWN;
1018 goto done;
1019 }
1020
70f23020
AE
1021 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1022 if (!skb)
1da177e4
LT
1023 goto done;
1024
6ce8e9ce 1025 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1da177e4
LT
1026 err = -EFAULT;
1027 goto drop;
1028 }
1029
0d48d939 1030 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1da177e4 1031 skb_pull(skb, 1);
1da177e4 1032
1bc5ad16
MH
1033 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1034 /* No permission check is needed for user channel
1035 * since that gets enforced when binding the socket.
1036 *
1037 * However check that the packet type is valid.
1038 */
1039 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1040 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1041 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1042 err = -EINVAL;
1043 goto drop;
1044 }
1045
1046 skb_queue_tail(&hdev->raw_q, skb);
1047 queue_work(hdev->workqueue, &hdev->tx_work);
1048 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
83985319 1049 u16 opcode = get_unaligned_le16(skb->data);
1da177e4
LT
1050 u16 ogf = hci_opcode_ogf(opcode);
1051 u16 ocf = hci_opcode_ocf(opcode);
1052
1053 if (((ogf > HCI_SFLT_MAX_OGF) ||
3bb3c755
GP
1054 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1055 &hci_sec_filter.ocf_mask[ogf])) &&
1056 !capable(CAP_NET_RAW)) {
1da177e4
LT
1057 err = -EPERM;
1058 goto drop;
1059 }
1060
fee746b0 1061 if (ogf == 0x3f) {
1da177e4 1062 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 1063 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 1064 } else {
49c922bb 1065 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
1066 * single-command requests.
1067 */
6368c235 1068 bt_cb(skb)->req_start = 1;
11714b3d 1069
1da177e4 1070 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 1071 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
1072 }
1073 } else {
1074 if (!capable(CAP_NET_RAW)) {
1075 err = -EPERM;
1076 goto drop;
1077 }
1078
1079 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 1080 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
1081 }
1082
1083 err = len;
1084
1085done:
1086 release_sock(sk);
1087 return err;
1088
1089drop:
1090 kfree_skb(skb);
1091 goto done;
1092}
1093
8fc9ced3
GP
1094static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1095 char __user *optval, unsigned int len)
1da177e4
LT
1096{
1097 struct hci_ufilter uf = { .opcode = 0 };
1098 struct sock *sk = sock->sk;
1099 int err = 0, opt = 0;
1100
1101 BT_DBG("sk %p, opt %d", sk, optname);
1102
1103 lock_sock(sk);
1104
2f39cdb7 1105 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
c2371e80 1106 err = -EBADFD;
2f39cdb7
MH
1107 goto done;
1108 }
1109
1da177e4
LT
1110 switch (optname) {
1111 case HCI_DATA_DIR:
1112 if (get_user(opt, (int __user *)optval)) {
1113 err = -EFAULT;
1114 break;
1115 }
1116
1117 if (opt)
1118 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1119 else
1120 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1121 break;
1122
1123 case HCI_TIME_STAMP:
1124 if (get_user(opt, (int __user *)optval)) {
1125 err = -EFAULT;
1126 break;
1127 }
1128
1129 if (opt)
1130 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1131 else
1132 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1133 break;
1134
1135 case HCI_FILTER:
0878b666
MH
1136 {
1137 struct hci_filter *f = &hci_pi(sk)->filter;
1138
1139 uf.type_mask = f->type_mask;
1140 uf.opcode = f->opcode;
1141 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1142 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1143 }
1144
1da177e4
LT
1145 len = min_t(unsigned int, len, sizeof(uf));
1146 if (copy_from_user(&uf, optval, len)) {
1147 err = -EFAULT;
1148 break;
1149 }
1150
1151 if (!capable(CAP_NET_RAW)) {
1152 uf.type_mask &= hci_sec_filter.type_mask;
1153 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1154 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1155 }
1156
1157 {
1158 struct hci_filter *f = &hci_pi(sk)->filter;
1159
1160 f->type_mask = uf.type_mask;
1161 f->opcode = uf.opcode;
1162 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1163 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1164 }
8e87d142 1165 break;
1da177e4
LT
1166
1167 default:
1168 err = -ENOPROTOOPT;
1169 break;
1170 }
1171
2f39cdb7 1172done:
1da177e4
LT
1173 release_sock(sk);
1174 return err;
1175}
1176
8fc9ced3
GP
1177static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1178 char __user *optval, int __user *optlen)
1da177e4
LT
1179{
1180 struct hci_ufilter uf;
1181 struct sock *sk = sock->sk;
cedc5469
MH
1182 int len, opt, err = 0;
1183
1184 BT_DBG("sk %p, opt %d", sk, optname);
1da177e4
LT
1185
1186 if (get_user(len, optlen))
1187 return -EFAULT;
1188
cedc5469
MH
1189 lock_sock(sk);
1190
1191 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
c2371e80 1192 err = -EBADFD;
cedc5469
MH
1193 goto done;
1194 }
1195
1da177e4
LT
1196 switch (optname) {
1197 case HCI_DATA_DIR:
1198 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1199 opt = 1;
8e87d142 1200 else
1da177e4
LT
1201 opt = 0;
1202
1203 if (put_user(opt, optval))
cedc5469 1204 err = -EFAULT;
1da177e4
LT
1205 break;
1206
1207 case HCI_TIME_STAMP:
1208 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1209 opt = 1;
8e87d142 1210 else
1da177e4
LT
1211 opt = 0;
1212
1213 if (put_user(opt, optval))
cedc5469 1214 err = -EFAULT;
1da177e4
LT
1215 break;
1216
1217 case HCI_FILTER:
1218 {
1219 struct hci_filter *f = &hci_pi(sk)->filter;
1220
e15ca9a0 1221 memset(&uf, 0, sizeof(uf));
1da177e4
LT
1222 uf.type_mask = f->type_mask;
1223 uf.opcode = f->opcode;
1224 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1225 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1226 }
1227
1228 len = min_t(unsigned int, len, sizeof(uf));
1229 if (copy_to_user(optval, &uf, len))
cedc5469 1230 err = -EFAULT;
1da177e4
LT
1231 break;
1232
1233 default:
cedc5469 1234 err = -ENOPROTOOPT;
1da177e4
LT
1235 break;
1236 }
1237
cedc5469
MH
1238done:
1239 release_sock(sk);
1240 return err;
1da177e4
LT
1241}
1242
90ddc4f0 1243static const struct proto_ops hci_sock_ops = {
1da177e4
LT
1244 .family = PF_BLUETOOTH,
1245 .owner = THIS_MODULE,
1246 .release = hci_sock_release,
1247 .bind = hci_sock_bind,
1248 .getname = hci_sock_getname,
1249 .sendmsg = hci_sock_sendmsg,
1250 .recvmsg = hci_sock_recvmsg,
1251 .ioctl = hci_sock_ioctl,
1252 .poll = datagram_poll,
1253 .listen = sock_no_listen,
1254 .shutdown = sock_no_shutdown,
1255 .setsockopt = hci_sock_setsockopt,
1256 .getsockopt = hci_sock_getsockopt,
1257 .connect = sock_no_connect,
1258 .socketpair = sock_no_socketpair,
1259 .accept = sock_no_accept,
1260 .mmap = sock_no_mmap
1261};
1262
1263static struct proto hci_sk_proto = {
1264 .name = "HCI",
1265 .owner = THIS_MODULE,
1266 .obj_size = sizeof(struct hci_pinfo)
1267};
1268
3f378b68
EP
1269static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1270 int kern)
1da177e4
LT
1271{
1272 struct sock *sk;
1273
1274 BT_DBG("sock %p", sock);
1275
1276 if (sock->type != SOCK_RAW)
1277 return -ESOCKTNOSUPPORT;
1278
1279 sock->ops = &hci_sock_ops;
1280
6257ff21 1281 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1da177e4
LT
1282 if (!sk)
1283 return -ENOMEM;
1284
1285 sock_init_data(sock, sk);
1286
1287 sock_reset_flag(sk, SOCK_ZAPPED);
1288
1289 sk->sk_protocol = protocol;
1290
1291 sock->state = SS_UNCONNECTED;
1292 sk->sk_state = BT_OPEN;
1293
1294 bt_sock_link(&hci_sk_list, sk);
1295 return 0;
1296}
1297
ec1b4cf7 1298static const struct net_proto_family hci_sock_family_ops = {
1da177e4
LT
1299 .family = PF_BLUETOOTH,
1300 .owner = THIS_MODULE,
1301 .create = hci_sock_create,
1302};
1303
1da177e4
LT
1304int __init hci_sock_init(void)
1305{
1306 int err;
1307
b0a8e282
MH
1308 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1309
1da177e4
LT
1310 err = proto_register(&hci_sk_proto, 0);
1311 if (err < 0)
1312 return err;
1313
1314 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
f7c86637
MY
1315 if (err < 0) {
1316 BT_ERR("HCI socket registration failed");
1da177e4 1317 goto error;
f7c86637
MY
1318 }
1319
b0316615 1320 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
f7c86637
MY
1321 if (err < 0) {
1322 BT_ERR("Failed to create HCI proc file");
1323 bt_sock_unregister(BTPROTO_HCI);
1324 goto error;
1325 }
1da177e4 1326
1da177e4
LT
1327 BT_INFO("HCI socket layer initialized");
1328
1329 return 0;
1330
1331error:
1da177e4
LT
1332 proto_unregister(&hci_sk_proto);
1333 return err;
1334}
1335
b7440a14 1336void hci_sock_cleanup(void)
1da177e4 1337{
f7c86637 1338 bt_procfs_cleanup(&init_net, "hci");
5e9d7f86 1339 bt_sock_unregister(BTPROTO_HCI);
1da177e4 1340 proto_unregister(&hci_sk_proto);
1da177e4 1341}
This page took 1.094704 seconds and 5 git commands to generate.