Bluetooth: Add hdev_init callback for HCI channels
[deliverable/linux.git] / net / bluetooth / hci_sock.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
8c520a59 27#include <linux/export.h>
1da177e4
LT
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
cd82e61c 32#include <net/bluetooth/hci_mon.h>
1da177e4 33
801c1e8d
JH
34static LIST_HEAD(mgmt_chan_list);
35static DEFINE_MUTEX(mgmt_chan_list_lock);
36
cd82e61c
MH
37static atomic_t monitor_promisc = ATOMIC_INIT(0);
38
1da177e4
LT
39/* ----- HCI socket interface ----- */
40
863def58
MH
41/* Socket info */
42#define hci_pi(sk) ((struct hci_pinfo *) sk)
43
44struct hci_pinfo {
45 struct bt_sock bt;
46 struct hci_dev *hdev;
47 struct hci_filter filter;
48 __u32 cmsg_mask;
49 unsigned short channel;
6befc644 50 unsigned long flags;
863def58
MH
51};
52
6befc644
MH
53void hci_sock_set_flag(struct sock *sk, int nr)
54{
55 set_bit(nr, &hci_pi(sk)->flags);
56}
57
58void hci_sock_clear_flag(struct sock *sk, int nr)
59{
60 clear_bit(nr, &hci_pi(sk)->flags);
61}
62
c85be545
MH
63int hci_sock_test_flag(struct sock *sk, int nr)
64{
65 return test_bit(nr, &hci_pi(sk)->flags);
66}
67
d0f172b1
JH
68unsigned short hci_sock_get_channel(struct sock *sk)
69{
70 return hci_pi(sk)->channel;
71}
72
9391976a 73static inline int hci_test_bit(int nr, const void *addr)
1da177e4 74{
9391976a 75 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
1da177e4
LT
76}
77
78/* Security filter */
3ad254f7
MH
79#define HCI_SFLT_MAX_OGF 5
80
81struct hci_sec_filter {
82 __u32 type_mask;
83 __u32 event_mask[2];
84 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
85};
86
7e67c112 87static const struct hci_sec_filter hci_sec_filter = {
1da177e4
LT
88 /* Packet types */
89 0x10,
90 /* Events */
dd7f5527 91 { 0x1000d9fe, 0x0000b00c },
1da177e4
LT
92 /* Commands */
93 {
94 { 0x0 },
95 /* OGF_LINK_CTL */
7c631a67 96 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
1da177e4 97 /* OGF_LINK_POLICY */
7c631a67 98 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
1da177e4 99 /* OGF_HOST_CTL */
7c631a67 100 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
1da177e4 101 /* OGF_INFO_PARAM */
7c631a67 102 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
1da177e4 103 /* OGF_STATUS_PARAM */
7c631a67 104 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
1da177e4
LT
105 }
106};
107
108static struct bt_sock_list hci_sk_list = {
d5fb2962 109 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
1da177e4
LT
110};
111
f81fe64f
MH
112static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
113{
114 struct hci_filter *flt;
115 int flt_type, flt_event;
116
117 /* Apply filter */
118 flt = &hci_pi(sk)->filter;
119
120 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
121 flt_type = 0;
122 else
123 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
124
125 if (!test_bit(flt_type, &flt->type_mask))
126 return true;
127
128 /* Extra filter for event packets only */
129 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
130 return false;
131
132 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
133
134 if (!hci_test_bit(flt_event, &flt->event_mask))
135 return true;
136
137 /* Check filter only when opcode is set */
138 if (!flt->opcode)
139 return false;
140
141 if (flt_event == HCI_EV_CMD_COMPLETE &&
142 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
143 return true;
144
145 if (flt_event == HCI_EV_CMD_STATUS &&
146 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
147 return true;
148
149 return false;
150}
151
1da177e4 152/* Send frame to RAW socket */
470fe1b5 153void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
154{
155 struct sock *sk;
e0edf373 156 struct sk_buff *skb_copy = NULL;
1da177e4
LT
157
158 BT_DBG("hdev %p len %d", hdev, skb->len);
159
160 read_lock(&hci_sk_list.lock);
470fe1b5 161
b67bfe0d 162 sk_for_each(sk, &hci_sk_list.head) {
1da177e4
LT
163 struct sk_buff *nskb;
164
165 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
166 continue;
167
168 /* Don't send frame to the socket it came from */
169 if (skb->sk == sk)
170 continue;
171
23500189
MH
172 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
173 if (is_filtered_packet(sk, skb))
174 continue;
175 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
176 if (!bt_cb(skb)->incoming)
177 continue;
178 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
179 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
180 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
181 continue;
182 } else {
183 /* Don't send frame to other channel types */
1da177e4 184 continue;
23500189 185 }
1da177e4 186
e0edf373
MH
187 if (!skb_copy) {
188 /* Create a private copy with headroom */
bad93e9d 189 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
e0edf373
MH
190 if (!skb_copy)
191 continue;
192
193 /* Put type byte before the data */
194 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
195 }
196
197 nskb = skb_clone(skb_copy, GFP_ATOMIC);
70f23020 198 if (!nskb)
1da177e4
LT
199 continue;
200
470fe1b5
MH
201 if (sock_queue_rcv_skb(sk, nskb))
202 kfree_skb(nskb);
203 }
204
205 read_unlock(&hci_sk_list.lock);
e0edf373
MH
206
207 kfree_skb(skb_copy);
470fe1b5
MH
208}
209
7129069e
JH
210/* Send frame to sockets with specific channel */
211void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
c08b1a1d 212 int flag, struct sock *skip_sk)
470fe1b5
MH
213{
214 struct sock *sk;
470fe1b5 215
7129069e 216 BT_DBG("channel %u len %d", channel, skb->len);
470fe1b5
MH
217
218 read_lock(&hci_sk_list.lock);
219
b67bfe0d 220 sk_for_each(sk, &hci_sk_list.head) {
470fe1b5
MH
221 struct sk_buff *nskb;
222
c08b1a1d 223 /* Ignore socket without the flag set */
c85be545 224 if (!hci_sock_test_flag(sk, flag))
d7f72f61
MH
225 continue;
226
c08b1a1d
MH
227 /* Skip the original socket */
228 if (sk == skip_sk)
17711c62
MH
229 continue;
230
231 if (sk->sk_state != BT_BOUND)
232 continue;
233
234 if (hci_pi(sk)->channel != channel)
235 continue;
236
237 nskb = skb_clone(skb, GFP_ATOMIC);
238 if (!nskb)
239 continue;
240
241 if (sock_queue_rcv_skb(sk, nskb))
242 kfree_skb(nskb);
243 }
244
245 read_unlock(&hci_sk_list.lock);
246}
247
cd82e61c
MH
248/* Send frame to monitor socket */
249void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
250{
cd82e61c 251 struct sk_buff *skb_copy = NULL;
2b531294 252 struct hci_mon_hdr *hdr;
cd82e61c
MH
253 __le16 opcode;
254
255 if (!atomic_read(&monitor_promisc))
256 return;
257
258 BT_DBG("hdev %p len %d", hdev, skb->len);
259
260 switch (bt_cb(skb)->pkt_type) {
261 case HCI_COMMAND_PKT:
dcf4adbf 262 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
cd82e61c
MH
263 break;
264 case HCI_EVENT_PKT:
dcf4adbf 265 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
cd82e61c
MH
266 break;
267 case HCI_ACLDATA_PKT:
268 if (bt_cb(skb)->incoming)
dcf4adbf 269 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
cd82e61c 270 else
dcf4adbf 271 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
cd82e61c
MH
272 break;
273 case HCI_SCODATA_PKT:
274 if (bt_cb(skb)->incoming)
dcf4adbf 275 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
cd82e61c 276 else
dcf4adbf 277 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
cd82e61c
MH
278 break;
279 default:
280 return;
281 }
282
2b531294
MH
283 /* Create a private copy with headroom */
284 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
285 if (!skb_copy)
286 return;
287
288 /* Put header before the data */
289 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
290 hdr->opcode = opcode;
291 hdr->index = cpu_to_le16(hdev->id);
292 hdr->len = cpu_to_le16(skb->len);
293
c08b1a1d
MH
294 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
295 HCI_SOCK_TRUSTED, NULL);
cd82e61c
MH
296 kfree_skb(skb_copy);
297}
298
cd82e61c
MH
299static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
300{
301 struct hci_mon_hdr *hdr;
302 struct hci_mon_new_index *ni;
303 struct sk_buff *skb;
304 __le16 opcode;
305
306 switch (event) {
307 case HCI_DEV_REG:
308 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
309 if (!skb)
310 return NULL;
311
312 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
313 ni->type = hdev->dev_type;
314 ni->bus = hdev->bus;
315 bacpy(&ni->bdaddr, &hdev->bdaddr);
316 memcpy(ni->name, hdev->name, 8);
317
dcf4adbf 318 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
cd82e61c
MH
319 break;
320
321 case HCI_DEV_UNREG:
322 skb = bt_skb_alloc(0, GFP_ATOMIC);
323 if (!skb)
324 return NULL;
325
dcf4adbf 326 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
cd82e61c
MH
327 break;
328
329 default:
330 return NULL;
331 }
332
333 __net_timestamp(skb);
334
335 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
336 hdr->opcode = opcode;
337 hdr->index = cpu_to_le16(hdev->id);
338 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
339
340 return skb;
341}
342
343static void send_monitor_replay(struct sock *sk)
344{
345 struct hci_dev *hdev;
346
347 read_lock(&hci_dev_list_lock);
348
349 list_for_each_entry(hdev, &hci_dev_list, list) {
350 struct sk_buff *skb;
351
352 skb = create_monitor_event(hdev, HCI_DEV_REG);
353 if (!skb)
354 continue;
355
356 if (sock_queue_rcv_skb(sk, skb))
357 kfree_skb(skb);
358 }
359
360 read_unlock(&hci_dev_list_lock);
361}
362
040030ef
MH
363/* Generate internal stack event */
364static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
365{
366 struct hci_event_hdr *hdr;
367 struct hci_ev_stack_internal *ev;
368 struct sk_buff *skb;
369
370 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
371 if (!skb)
372 return;
373
374 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
375 hdr->evt = HCI_EV_STACK_INTERNAL;
376 hdr->plen = sizeof(*ev) + dlen;
377
378 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
379 ev->type = type;
380 memcpy(ev->data, data, dlen);
381
382 bt_cb(skb)->incoming = 1;
383 __net_timestamp(skb);
384
385 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
040030ef
MH
386 hci_send_to_sock(hdev, skb);
387 kfree_skb(skb);
388}
389
390void hci_sock_dev_event(struct hci_dev *hdev, int event)
391{
392 struct hci_ev_si_device ev;
393
394 BT_DBG("hdev %s event %d", hdev->name, event);
395
cd82e61c
MH
396 /* Send event to monitor */
397 if (atomic_read(&monitor_promisc)) {
398 struct sk_buff *skb;
399
400 skb = create_monitor_event(hdev, event);
401 if (skb) {
c08b1a1d
MH
402 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
403 HCI_SOCK_TRUSTED, NULL);
cd82e61c
MH
404 kfree_skb(skb);
405 }
406 }
407
040030ef
MH
408 /* Send event to sockets */
409 ev.event = event;
410 ev.dev_id = hdev->id;
411 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
412
413 if (event == HCI_DEV_UNREG) {
414 struct sock *sk;
040030ef
MH
415
416 /* Detach sockets from device */
417 read_lock(&hci_sk_list.lock);
b67bfe0d 418 sk_for_each(sk, &hci_sk_list.head) {
040030ef
MH
419 bh_lock_sock_nested(sk);
420 if (hci_pi(sk)->hdev == hdev) {
421 hci_pi(sk)->hdev = NULL;
422 sk->sk_err = EPIPE;
423 sk->sk_state = BT_OPEN;
424 sk->sk_state_change(sk);
425
426 hci_dev_put(hdev);
427 }
428 bh_unlock_sock(sk);
429 }
430 read_unlock(&hci_sk_list.lock);
431 }
432}
433
801c1e8d
JH
434static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
435{
436 struct hci_mgmt_chan *c;
437
438 list_for_each_entry(c, &mgmt_chan_list, list) {
439 if (c->channel == channel)
440 return c;
441 }
442
443 return NULL;
444}
445
446static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
447{
448 struct hci_mgmt_chan *c;
449
450 mutex_lock(&mgmt_chan_list_lock);
451 c = __hci_mgmt_chan_find(channel);
452 mutex_unlock(&mgmt_chan_list_lock);
453
454 return c;
455}
456
457int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
458{
459 if (c->channel < HCI_CHANNEL_CONTROL)
460 return -EINVAL;
461
462 mutex_lock(&mgmt_chan_list_lock);
463 if (__hci_mgmt_chan_find(c->channel)) {
464 mutex_unlock(&mgmt_chan_list_lock);
465 return -EALREADY;
466 }
467
468 list_add_tail(&c->list, &mgmt_chan_list);
469
470 mutex_unlock(&mgmt_chan_list_lock);
471
472 return 0;
473}
474EXPORT_SYMBOL(hci_mgmt_chan_register);
475
476void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
477{
478 mutex_lock(&mgmt_chan_list_lock);
479 list_del(&c->list);
480 mutex_unlock(&mgmt_chan_list_lock);
481}
482EXPORT_SYMBOL(hci_mgmt_chan_unregister);
483
1da177e4
LT
484static int hci_sock_release(struct socket *sock)
485{
486 struct sock *sk = sock->sk;
7b005bd3 487 struct hci_dev *hdev;
1da177e4
LT
488
489 BT_DBG("sock %p sk %p", sock, sk);
490
491 if (!sk)
492 return 0;
493
7b005bd3
MH
494 hdev = hci_pi(sk)->hdev;
495
cd82e61c
MH
496 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
497 atomic_dec(&monitor_promisc);
498
1da177e4
LT
499 bt_sock_unlink(&hci_sk_list, sk);
500
501 if (hdev) {
23500189 502 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
0602a8ad 503 mgmt_index_added(hdev);
a358dc11 504 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
23500189
MH
505 hci_dev_close(hdev->id);
506 }
507
1da177e4
LT
508 atomic_dec(&hdev->promisc);
509 hci_dev_put(hdev);
510 }
511
512 sock_orphan(sk);
513
514 skb_queue_purge(&sk->sk_receive_queue);
515 skb_queue_purge(&sk->sk_write_queue);
516
517 sock_put(sk);
518 return 0;
519}
520
b2a66aad 521static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
f0358568
JH
522{
523 bdaddr_t bdaddr;
5e762444 524 int err;
f0358568
JH
525
526 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
527 return -EFAULT;
528
09fd0de5 529 hci_dev_lock(hdev);
5e762444 530
dcc36c16 531 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
5e762444 532
09fd0de5 533 hci_dev_unlock(hdev);
5e762444
AJ
534
535 return err;
f0358568
JH
536}
537
b2a66aad 538static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
f0358568
JH
539{
540 bdaddr_t bdaddr;
5e762444 541 int err;
f0358568
JH
542
543 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
544 return -EFAULT;
545
09fd0de5 546 hci_dev_lock(hdev);
5e762444 547
dcc36c16 548 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
5e762444 549
09fd0de5 550 hci_dev_unlock(hdev);
5e762444
AJ
551
552 return err;
f0358568
JH
553}
554
8e87d142 555/* Ioctls that require bound socket */
6039aa73
GP
556static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
557 unsigned long arg)
1da177e4
LT
558{
559 struct hci_dev *hdev = hci_pi(sk)->hdev;
560
561 if (!hdev)
562 return -EBADFD;
563
d7a5a11d 564 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
565 return -EBUSY;
566
d7a5a11d 567 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
fee746b0
MH
568 return -EOPNOTSUPP;
569
5b69bef5
MH
570 if (hdev->dev_type != HCI_BREDR)
571 return -EOPNOTSUPP;
572
1da177e4
LT
573 switch (cmd) {
574 case HCISETRAW:
575 if (!capable(CAP_NET_ADMIN))
bf5b30b8 576 return -EPERM;
db596681 577 return -EOPNOTSUPP;
1da177e4 578
1da177e4 579 case HCIGETCONNINFO:
40be492f
MH
580 return hci_get_conn_info(hdev, (void __user *) arg);
581
582 case HCIGETAUTHINFO:
583 return hci_get_auth_info(hdev, (void __user *) arg);
1da177e4 584
f0358568
JH
585 case HCIBLOCKADDR:
586 if (!capable(CAP_NET_ADMIN))
bf5b30b8 587 return -EPERM;
b2a66aad 588 return hci_sock_blacklist_add(hdev, (void __user *) arg);
f0358568
JH
589
590 case HCIUNBLOCKADDR:
591 if (!capable(CAP_NET_ADMIN))
bf5b30b8 592 return -EPERM;
b2a66aad 593 return hci_sock_blacklist_del(hdev, (void __user *) arg);
1da177e4 594 }
0736cfa8 595
324d36ed 596 return -ENOIOCTLCMD;
1da177e4
LT
597}
598
8fc9ced3
GP
599static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
600 unsigned long arg)
1da177e4 601{
40be492f 602 void __user *argp = (void __user *) arg;
0736cfa8 603 struct sock *sk = sock->sk;
1da177e4
LT
604 int err;
605
606 BT_DBG("cmd %x arg %lx", cmd, arg);
607
c1c4f956
MH
608 lock_sock(sk);
609
610 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
611 err = -EBADFD;
612 goto done;
613 }
614
615 release_sock(sk);
616
1da177e4
LT
617 switch (cmd) {
618 case HCIGETDEVLIST:
619 return hci_get_dev_list(argp);
620
621 case HCIGETDEVINFO:
622 return hci_get_dev_info(argp);
623
624 case HCIGETCONNLIST:
625 return hci_get_conn_list(argp);
626
627 case HCIDEVUP:
628 if (!capable(CAP_NET_ADMIN))
bf5b30b8 629 return -EPERM;
1da177e4
LT
630 return hci_dev_open(arg);
631
632 case HCIDEVDOWN:
633 if (!capable(CAP_NET_ADMIN))
bf5b30b8 634 return -EPERM;
1da177e4
LT
635 return hci_dev_close(arg);
636
637 case HCIDEVRESET:
638 if (!capable(CAP_NET_ADMIN))
bf5b30b8 639 return -EPERM;
1da177e4
LT
640 return hci_dev_reset(arg);
641
642 case HCIDEVRESTAT:
643 if (!capable(CAP_NET_ADMIN))
bf5b30b8 644 return -EPERM;
1da177e4
LT
645 return hci_dev_reset_stat(arg);
646
647 case HCISETSCAN:
648 case HCISETAUTH:
649 case HCISETENCRYPT:
650 case HCISETPTYPE:
651 case HCISETLINKPOL:
652 case HCISETLINKMODE:
653 case HCISETACLMTU:
654 case HCISETSCOMTU:
655 if (!capable(CAP_NET_ADMIN))
bf5b30b8 656 return -EPERM;
1da177e4
LT
657 return hci_dev_cmd(cmd, argp);
658
659 case HCIINQUIRY:
660 return hci_inquiry(argp);
1da177e4 661 }
c1c4f956
MH
662
663 lock_sock(sk);
664
665 err = hci_sock_bound_ioctl(sk, cmd, arg);
666
667done:
668 release_sock(sk);
669 return err;
1da177e4
LT
670}
671
8fc9ced3
GP
672static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
673 int addr_len)
1da177e4 674{
0381101f 675 struct sockaddr_hci haddr;
1da177e4
LT
676 struct sock *sk = sock->sk;
677 struct hci_dev *hdev = NULL;
0381101f 678 int len, err = 0;
1da177e4
LT
679
680 BT_DBG("sock %p sk %p", sock, sk);
681
0381101f
JH
682 if (!addr)
683 return -EINVAL;
684
685 memset(&haddr, 0, sizeof(haddr));
686 len = min_t(unsigned int, sizeof(haddr), addr_len);
687 memcpy(&haddr, addr, len);
688
689 if (haddr.hci_family != AF_BLUETOOTH)
690 return -EINVAL;
691
1da177e4
LT
692 lock_sock(sk);
693
7cc2ade2 694 if (sk->sk_state == BT_BOUND) {
1da177e4
LT
695 err = -EALREADY;
696 goto done;
697 }
698
7cc2ade2
MH
699 switch (haddr.hci_channel) {
700 case HCI_CHANNEL_RAW:
701 if (hci_pi(sk)->hdev) {
702 err = -EALREADY;
1da177e4
LT
703 goto done;
704 }
705
7cc2ade2
MH
706 if (haddr.hci_dev != HCI_DEV_NONE) {
707 hdev = hci_dev_get(haddr.hci_dev);
708 if (!hdev) {
709 err = -ENODEV;
710 goto done;
711 }
712
713 atomic_inc(&hdev->promisc);
714 }
715
716 hci_pi(sk)->hdev = hdev;
717 break;
718
23500189
MH
719 case HCI_CHANNEL_USER:
720 if (hci_pi(sk)->hdev) {
721 err = -EALREADY;
722 goto done;
723 }
724
725 if (haddr.hci_dev == HCI_DEV_NONE) {
726 err = -EINVAL;
727 goto done;
728 }
729
10a8b86f 730 if (!capable(CAP_NET_ADMIN)) {
23500189
MH
731 err = -EPERM;
732 goto done;
733 }
734
735 hdev = hci_dev_get(haddr.hci_dev);
736 if (!hdev) {
737 err = -ENODEV;
738 goto done;
739 }
740
741 if (test_bit(HCI_UP, &hdev->flags) ||
742 test_bit(HCI_INIT, &hdev->flags) ||
d7a5a11d
MH
743 hci_dev_test_flag(hdev, HCI_SETUP) ||
744 hci_dev_test_flag(hdev, HCI_CONFIG)) {
23500189
MH
745 err = -EBUSY;
746 hci_dev_put(hdev);
747 goto done;
748 }
749
238be788 750 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
23500189
MH
751 err = -EUSERS;
752 hci_dev_put(hdev);
753 goto done;
754 }
755
0602a8ad 756 mgmt_index_removed(hdev);
23500189
MH
757
758 err = hci_dev_open(hdev->id);
759 if (err) {
a358dc11 760 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
0602a8ad 761 mgmt_index_added(hdev);
23500189
MH
762 hci_dev_put(hdev);
763 goto done;
764 }
765
766 atomic_inc(&hdev->promisc);
767
768 hci_pi(sk)->hdev = hdev;
769 break;
770
cd82e61c
MH
771 case HCI_CHANNEL_MONITOR:
772 if (haddr.hci_dev != HCI_DEV_NONE) {
773 err = -EINVAL;
774 goto done;
775 }
776
777 if (!capable(CAP_NET_RAW)) {
778 err = -EPERM;
779 goto done;
780 }
781
50ebc055
MH
782 /* The monitor interface is restricted to CAP_NET_RAW
783 * capabilities and with that implicitly trusted.
784 */
785 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
786
cd82e61c
MH
787 send_monitor_replay(sk);
788
789 atomic_inc(&monitor_promisc);
790 break;
791
7cc2ade2 792 default:
801c1e8d
JH
793 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
794 err = -EINVAL;
795 goto done;
796 }
797
798 if (haddr.hci_dev != HCI_DEV_NONE) {
799 err = -EINVAL;
800 goto done;
801 }
802
1195fbb8
MH
803 /* Users with CAP_NET_ADMIN capabilities are allowed
804 * access to all management commands and events. For
805 * untrusted users the interface is restricted and
806 * also only untrusted events are sent.
50ebc055 807 */
1195fbb8
MH
808 if (capable(CAP_NET_ADMIN))
809 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
50ebc055 810
f9207338
MH
811 /* At the moment the index and unconfigured index events
812 * are enabled unconditionally. Setting them on each
813 * socket when binding keeps this functionality. They
814 * however might be cleared later and then sending of these
815 * events will be disabled, but that is then intentional.
f6b7712e
MH
816 *
817 * This also enables generic events that are safe to be
818 * received by untrusted users. Example for such events
819 * are changes to settings, class of device, name etc.
f9207338
MH
820 */
821 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
822 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
823 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
f6b7712e 824 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
f9207338 825 }
801c1e8d 826 break;
1da177e4
LT
827 }
828
7cc2ade2 829
0381101f 830 hci_pi(sk)->channel = haddr.hci_channel;
1da177e4
LT
831 sk->sk_state = BT_BOUND;
832
833done:
834 release_sock(sk);
835 return err;
836}
837
8fc9ced3
GP
838static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
839 int *addr_len, int peer)
1da177e4
LT
840{
841 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
842 struct sock *sk = sock->sk;
9d4b68b2
MH
843 struct hci_dev *hdev;
844 int err = 0;
1da177e4
LT
845
846 BT_DBG("sock %p sk %p", sock, sk);
847
06f43cbc
MH
848 if (peer)
849 return -EOPNOTSUPP;
850
1da177e4
LT
851 lock_sock(sk);
852
9d4b68b2
MH
853 hdev = hci_pi(sk)->hdev;
854 if (!hdev) {
855 err = -EBADFD;
856 goto done;
857 }
858
1da177e4
LT
859 *addr_len = sizeof(*haddr);
860 haddr->hci_family = AF_BLUETOOTH;
7b005bd3 861 haddr->hci_dev = hdev->id;
9d4b68b2 862 haddr->hci_channel= hci_pi(sk)->channel;
1da177e4 863
9d4b68b2 864done:
1da177e4 865 release_sock(sk);
9d4b68b2 866 return err;
1da177e4
LT
867}
868
6039aa73
GP
869static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
870 struct sk_buff *skb)
1da177e4
LT
871{
872 __u32 mask = hci_pi(sk)->cmsg_mask;
873
0d48d939
MH
874 if (mask & HCI_CMSG_DIR) {
875 int incoming = bt_cb(skb)->incoming;
8fc9ced3
GP
876 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
877 &incoming);
0d48d939 878 }
1da177e4 879
a61bbcf2 880 if (mask & HCI_CMSG_TSTAMP) {
f6e623a6
JFS
881#ifdef CONFIG_COMPAT
882 struct compat_timeval ctv;
883#endif
a61bbcf2 884 struct timeval tv;
767c5eb5
MH
885 void *data;
886 int len;
a61bbcf2
PM
887
888 skb_get_timestamp(skb, &tv);
767c5eb5 889
1da97f83
DM
890 data = &tv;
891 len = sizeof(tv);
892#ifdef CONFIG_COMPAT
da88cea1
L
893 if (!COMPAT_USE_64BIT_TIME &&
894 (msg->msg_flags & MSG_CMSG_COMPAT)) {
767c5eb5
MH
895 ctv.tv_sec = tv.tv_sec;
896 ctv.tv_usec = tv.tv_usec;
897 data = &ctv;
898 len = sizeof(ctv);
767c5eb5 899 }
1da97f83 900#endif
767c5eb5
MH
901
902 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
a61bbcf2 903 }
1da177e4 904}
8e87d142 905
1b784140
YX
906static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
907 int flags)
1da177e4
LT
908{
909 int noblock = flags & MSG_DONTWAIT;
910 struct sock *sk = sock->sk;
911 struct sk_buff *skb;
912 int copied, err;
913
914 BT_DBG("sock %p, sk %p", sock, sk);
915
916 if (flags & (MSG_OOB))
917 return -EOPNOTSUPP;
918
919 if (sk->sk_state == BT_CLOSED)
920 return 0;
921
70f23020
AE
922 skb = skb_recv_datagram(sk, flags, noblock, &err);
923 if (!skb)
1da177e4
LT
924 return err;
925
1da177e4
LT
926 copied = skb->len;
927 if (len < copied) {
928 msg->msg_flags |= MSG_TRUNC;
929 copied = len;
930 }
931
badff6d0 932 skb_reset_transport_header(skb);
51f3d02b 933 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1da177e4 934
3a208627
MH
935 switch (hci_pi(sk)->channel) {
936 case HCI_CHANNEL_RAW:
937 hci_sock_cmsg(sk, msg, skb);
938 break;
23500189 939 case HCI_CHANNEL_USER:
cd82e61c
MH
940 case HCI_CHANNEL_MONITOR:
941 sock_recv_timestamp(msg, sk, skb);
942 break;
801c1e8d
JH
943 default:
944 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
945 sock_recv_timestamp(msg, sk, skb);
946 break;
3a208627 947 }
1da177e4
LT
948
949 skb_free_datagram(sk, skb);
950
951 return err ? : copied;
952}
953
1b784140
YX
954static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
955 size_t len)
1da177e4
LT
956{
957 struct sock *sk = sock->sk;
801c1e8d 958 struct hci_mgmt_chan *chan;
1da177e4
LT
959 struct hci_dev *hdev;
960 struct sk_buff *skb;
961 int err;
962
963 BT_DBG("sock %p sk %p", sock, sk);
964
965 if (msg->msg_flags & MSG_OOB)
966 return -EOPNOTSUPP;
967
968 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
969 return -EINVAL;
970
971 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
972 return -EINVAL;
973
974 lock_sock(sk);
975
0381101f
JH
976 switch (hci_pi(sk)->channel) {
977 case HCI_CHANNEL_RAW:
23500189 978 case HCI_CHANNEL_USER:
0381101f 979 break;
cd82e61c
MH
980 case HCI_CHANNEL_MONITOR:
981 err = -EOPNOTSUPP;
982 goto done;
0381101f 983 default:
801c1e8d
JH
984 mutex_lock(&mgmt_chan_list_lock);
985 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
986 if (chan)
6d785aa3 987 err = mgmt_control(chan, sk, msg, len);
801c1e8d
JH
988 else
989 err = -EINVAL;
990
991 mutex_unlock(&mgmt_chan_list_lock);
0381101f
JH
992 goto done;
993 }
994
70f23020
AE
995 hdev = hci_pi(sk)->hdev;
996 if (!hdev) {
1da177e4
LT
997 err = -EBADFD;
998 goto done;
999 }
1000
7e21addc
MH
1001 if (!test_bit(HCI_UP, &hdev->flags)) {
1002 err = -ENETDOWN;
1003 goto done;
1004 }
1005
70f23020
AE
1006 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1007 if (!skb)
1da177e4
LT
1008 goto done;
1009
6ce8e9ce 1010 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1da177e4
LT
1011 err = -EFAULT;
1012 goto drop;
1013 }
1014
0d48d939 1015 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1da177e4 1016 skb_pull(skb, 1);
1da177e4 1017
1bc5ad16
MH
1018 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1019 /* No permission check is needed for user channel
1020 * since that gets enforced when binding the socket.
1021 *
1022 * However check that the packet type is valid.
1023 */
1024 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1025 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1026 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1027 err = -EINVAL;
1028 goto drop;
1029 }
1030
1031 skb_queue_tail(&hdev->raw_q, skb);
1032 queue_work(hdev->workqueue, &hdev->tx_work);
1033 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
83985319 1034 u16 opcode = get_unaligned_le16(skb->data);
1da177e4
LT
1035 u16 ogf = hci_opcode_ogf(opcode);
1036 u16 ocf = hci_opcode_ocf(opcode);
1037
1038 if (((ogf > HCI_SFLT_MAX_OGF) ||
3bb3c755
GP
1039 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1040 &hci_sec_filter.ocf_mask[ogf])) &&
1041 !capable(CAP_NET_RAW)) {
1da177e4
LT
1042 err = -EPERM;
1043 goto drop;
1044 }
1045
fee746b0 1046 if (ogf == 0x3f) {
1da177e4 1047 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 1048 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 1049 } else {
49c922bb 1050 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
1051 * single-command requests.
1052 */
6368c235 1053 bt_cb(skb)->req_start = 1;
11714b3d 1054
1da177e4 1055 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 1056 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
1057 }
1058 } else {
1059 if (!capable(CAP_NET_RAW)) {
1060 err = -EPERM;
1061 goto drop;
1062 }
1063
1064 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 1065 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
1066 }
1067
1068 err = len;
1069
1070done:
1071 release_sock(sk);
1072 return err;
1073
1074drop:
1075 kfree_skb(skb);
1076 goto done;
1077}
1078
8fc9ced3
GP
1079static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1080 char __user *optval, unsigned int len)
1da177e4
LT
1081{
1082 struct hci_ufilter uf = { .opcode = 0 };
1083 struct sock *sk = sock->sk;
1084 int err = 0, opt = 0;
1085
1086 BT_DBG("sk %p, opt %d", sk, optname);
1087
1088 lock_sock(sk);
1089
2f39cdb7 1090 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
c2371e80 1091 err = -EBADFD;
2f39cdb7
MH
1092 goto done;
1093 }
1094
1da177e4
LT
1095 switch (optname) {
1096 case HCI_DATA_DIR:
1097 if (get_user(opt, (int __user *)optval)) {
1098 err = -EFAULT;
1099 break;
1100 }
1101
1102 if (opt)
1103 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1104 else
1105 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1106 break;
1107
1108 case HCI_TIME_STAMP:
1109 if (get_user(opt, (int __user *)optval)) {
1110 err = -EFAULT;
1111 break;
1112 }
1113
1114 if (opt)
1115 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1116 else
1117 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1118 break;
1119
1120 case HCI_FILTER:
0878b666
MH
1121 {
1122 struct hci_filter *f = &hci_pi(sk)->filter;
1123
1124 uf.type_mask = f->type_mask;
1125 uf.opcode = f->opcode;
1126 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1127 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1128 }
1129
1da177e4
LT
1130 len = min_t(unsigned int, len, sizeof(uf));
1131 if (copy_from_user(&uf, optval, len)) {
1132 err = -EFAULT;
1133 break;
1134 }
1135
1136 if (!capable(CAP_NET_RAW)) {
1137 uf.type_mask &= hci_sec_filter.type_mask;
1138 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1139 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1140 }
1141
1142 {
1143 struct hci_filter *f = &hci_pi(sk)->filter;
1144
1145 f->type_mask = uf.type_mask;
1146 f->opcode = uf.opcode;
1147 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1148 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1149 }
8e87d142 1150 break;
1da177e4
LT
1151
1152 default:
1153 err = -ENOPROTOOPT;
1154 break;
1155 }
1156
2f39cdb7 1157done:
1da177e4
LT
1158 release_sock(sk);
1159 return err;
1160}
1161
8fc9ced3
GP
1162static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1163 char __user *optval, int __user *optlen)
1da177e4
LT
1164{
1165 struct hci_ufilter uf;
1166 struct sock *sk = sock->sk;
cedc5469
MH
1167 int len, opt, err = 0;
1168
1169 BT_DBG("sk %p, opt %d", sk, optname);
1da177e4
LT
1170
1171 if (get_user(len, optlen))
1172 return -EFAULT;
1173
cedc5469
MH
1174 lock_sock(sk);
1175
1176 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
c2371e80 1177 err = -EBADFD;
cedc5469
MH
1178 goto done;
1179 }
1180
1da177e4
LT
1181 switch (optname) {
1182 case HCI_DATA_DIR:
1183 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1184 opt = 1;
8e87d142 1185 else
1da177e4
LT
1186 opt = 0;
1187
1188 if (put_user(opt, optval))
cedc5469 1189 err = -EFAULT;
1da177e4
LT
1190 break;
1191
1192 case HCI_TIME_STAMP:
1193 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1194 opt = 1;
8e87d142 1195 else
1da177e4
LT
1196 opt = 0;
1197
1198 if (put_user(opt, optval))
cedc5469 1199 err = -EFAULT;
1da177e4
LT
1200 break;
1201
1202 case HCI_FILTER:
1203 {
1204 struct hci_filter *f = &hci_pi(sk)->filter;
1205
e15ca9a0 1206 memset(&uf, 0, sizeof(uf));
1da177e4
LT
1207 uf.type_mask = f->type_mask;
1208 uf.opcode = f->opcode;
1209 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1210 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1211 }
1212
1213 len = min_t(unsigned int, len, sizeof(uf));
1214 if (copy_to_user(optval, &uf, len))
cedc5469 1215 err = -EFAULT;
1da177e4
LT
1216 break;
1217
1218 default:
cedc5469 1219 err = -ENOPROTOOPT;
1da177e4
LT
1220 break;
1221 }
1222
cedc5469
MH
1223done:
1224 release_sock(sk);
1225 return err;
1da177e4
LT
1226}
1227
90ddc4f0 1228static const struct proto_ops hci_sock_ops = {
1da177e4
LT
1229 .family = PF_BLUETOOTH,
1230 .owner = THIS_MODULE,
1231 .release = hci_sock_release,
1232 .bind = hci_sock_bind,
1233 .getname = hci_sock_getname,
1234 .sendmsg = hci_sock_sendmsg,
1235 .recvmsg = hci_sock_recvmsg,
1236 .ioctl = hci_sock_ioctl,
1237 .poll = datagram_poll,
1238 .listen = sock_no_listen,
1239 .shutdown = sock_no_shutdown,
1240 .setsockopt = hci_sock_setsockopt,
1241 .getsockopt = hci_sock_getsockopt,
1242 .connect = sock_no_connect,
1243 .socketpair = sock_no_socketpair,
1244 .accept = sock_no_accept,
1245 .mmap = sock_no_mmap
1246};
1247
1248static struct proto hci_sk_proto = {
1249 .name = "HCI",
1250 .owner = THIS_MODULE,
1251 .obj_size = sizeof(struct hci_pinfo)
1252};
1253
3f378b68
EP
1254static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1255 int kern)
1da177e4
LT
1256{
1257 struct sock *sk;
1258
1259 BT_DBG("sock %p", sock);
1260
1261 if (sock->type != SOCK_RAW)
1262 return -ESOCKTNOSUPPORT;
1263
1264 sock->ops = &hci_sock_ops;
1265
6257ff21 1266 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1da177e4
LT
1267 if (!sk)
1268 return -ENOMEM;
1269
1270 sock_init_data(sock, sk);
1271
1272 sock_reset_flag(sk, SOCK_ZAPPED);
1273
1274 sk->sk_protocol = protocol;
1275
1276 sock->state = SS_UNCONNECTED;
1277 sk->sk_state = BT_OPEN;
1278
1279 bt_sock_link(&hci_sk_list, sk);
1280 return 0;
1281}
1282
ec1b4cf7 1283static const struct net_proto_family hci_sock_family_ops = {
1da177e4
LT
1284 .family = PF_BLUETOOTH,
1285 .owner = THIS_MODULE,
1286 .create = hci_sock_create,
1287};
1288
1da177e4
LT
1289int __init hci_sock_init(void)
1290{
1291 int err;
1292
b0a8e282
MH
1293 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1294
1da177e4
LT
1295 err = proto_register(&hci_sk_proto, 0);
1296 if (err < 0)
1297 return err;
1298
1299 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
f7c86637
MY
1300 if (err < 0) {
1301 BT_ERR("HCI socket registration failed");
1da177e4 1302 goto error;
f7c86637
MY
1303 }
1304
b0316615 1305 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
f7c86637
MY
1306 if (err < 0) {
1307 BT_ERR("Failed to create HCI proc file");
1308 bt_sock_unregister(BTPROTO_HCI);
1309 goto error;
1310 }
1da177e4 1311
1da177e4
LT
1312 BT_INFO("HCI socket layer initialized");
1313
1314 return 0;
1315
1316error:
1da177e4
LT
1317 proto_unregister(&hci_sk_proto);
1318 return err;
1319}
1320
b7440a14 1321void hci_sock_cleanup(void)
1da177e4 1322{
f7c86637 1323 bt_procfs_cleanup(&init_net, "hci");
5e9d7f86 1324 bt_sock_unregister(BTPROTO_HCI);
1da177e4 1325 proto_unregister(&hci_sk_proto);
1da177e4 1326}
This page took 0.774104 seconds and 5 git commands to generate.