Bluetooth: btbcm: Read the local name in setup stage
[deliverable/linux.git] / net / bluetooth / hci_sock.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
8c520a59 27#include <linux/export.h>
1da177e4
LT
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
cd82e61c 32#include <net/bluetooth/hci_mon.h>
fa4335d7
JH
33#include <net/bluetooth/mgmt.h>
34
35#include "mgmt_util.h"
1da177e4 36
801c1e8d
JH
37static LIST_HEAD(mgmt_chan_list);
38static DEFINE_MUTEX(mgmt_chan_list_lock);
39
cd82e61c
MH
40static atomic_t monitor_promisc = ATOMIC_INIT(0);
41
1da177e4
LT
42/* ----- HCI socket interface ----- */
43
863def58
MH
44/* Socket info */
45#define hci_pi(sk) ((struct hci_pinfo *) sk)
46
47struct hci_pinfo {
48 struct bt_sock bt;
49 struct hci_dev *hdev;
50 struct hci_filter filter;
51 __u32 cmsg_mask;
52 unsigned short channel;
6befc644 53 unsigned long flags;
863def58
MH
54};
55
6befc644
MH
56void hci_sock_set_flag(struct sock *sk, int nr)
57{
58 set_bit(nr, &hci_pi(sk)->flags);
59}
60
61void hci_sock_clear_flag(struct sock *sk, int nr)
62{
63 clear_bit(nr, &hci_pi(sk)->flags);
64}
65
c85be545
MH
66int hci_sock_test_flag(struct sock *sk, int nr)
67{
68 return test_bit(nr, &hci_pi(sk)->flags);
69}
70
d0f172b1
JH
71unsigned short hci_sock_get_channel(struct sock *sk)
72{
73 return hci_pi(sk)->channel;
74}
75
9391976a 76static inline int hci_test_bit(int nr, const void *addr)
1da177e4 77{
9391976a 78 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
1da177e4
LT
79}
80
81/* Security filter */
3ad254f7
MH
82#define HCI_SFLT_MAX_OGF 5
83
84struct hci_sec_filter {
85 __u32 type_mask;
86 __u32 event_mask[2];
87 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88};
89
7e67c112 90static const struct hci_sec_filter hci_sec_filter = {
1da177e4
LT
91 /* Packet types */
92 0x10,
93 /* Events */
dd7f5527 94 { 0x1000d9fe, 0x0000b00c },
1da177e4
LT
95 /* Commands */
96 {
97 { 0x0 },
98 /* OGF_LINK_CTL */
7c631a67 99 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
1da177e4 100 /* OGF_LINK_POLICY */
7c631a67 101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
1da177e4 102 /* OGF_HOST_CTL */
7c631a67 103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
1da177e4 104 /* OGF_INFO_PARAM */
7c631a67 105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
1da177e4 106 /* OGF_STATUS_PARAM */
7c631a67 107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
1da177e4
LT
108 }
109};
110
111static struct bt_sock_list hci_sk_list = {
d5fb2962 112 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
1da177e4
LT
113};
114
f81fe64f
MH
115static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116{
117 struct hci_filter *flt;
118 int flt_type, flt_event;
119
120 /* Apply filter */
121 flt = &hci_pi(sk)->filter;
122
123 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
124 flt_type = 0;
125 else
126 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
127
128 if (!test_bit(flt_type, &flt->type_mask))
129 return true;
130
131 /* Extra filter for event packets only */
132 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
133 return false;
134
135 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
136
137 if (!hci_test_bit(flt_event, &flt->event_mask))
138 return true;
139
140 /* Check filter only when opcode is set */
141 if (!flt->opcode)
142 return false;
143
144 if (flt_event == HCI_EV_CMD_COMPLETE &&
145 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
146 return true;
147
148 if (flt_event == HCI_EV_CMD_STATUS &&
149 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
150 return true;
151
152 return false;
153}
154
1da177e4 155/* Send frame to RAW socket */
470fe1b5 156void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
157{
158 struct sock *sk;
e0edf373 159 struct sk_buff *skb_copy = NULL;
1da177e4
LT
160
161 BT_DBG("hdev %p len %d", hdev, skb->len);
162
163 read_lock(&hci_sk_list.lock);
470fe1b5 164
b67bfe0d 165 sk_for_each(sk, &hci_sk_list.head) {
1da177e4
LT
166 struct sk_buff *nskb;
167
168 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
169 continue;
170
171 /* Don't send frame to the socket it came from */
172 if (skb->sk == sk)
173 continue;
174
23500189
MH
175 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
176 if (is_filtered_packet(sk, skb))
177 continue;
178 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
179 if (!bt_cb(skb)->incoming)
180 continue;
181 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
182 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
183 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
184 continue;
185 } else {
186 /* Don't send frame to other channel types */
1da177e4 187 continue;
23500189 188 }
1da177e4 189
e0edf373
MH
190 if (!skb_copy) {
191 /* Create a private copy with headroom */
bad93e9d 192 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
e0edf373
MH
193 if (!skb_copy)
194 continue;
195
196 /* Put type byte before the data */
197 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
198 }
199
200 nskb = skb_clone(skb_copy, GFP_ATOMIC);
70f23020 201 if (!nskb)
1da177e4
LT
202 continue;
203
470fe1b5
MH
204 if (sock_queue_rcv_skb(sk, nskb))
205 kfree_skb(nskb);
206 }
207
208 read_unlock(&hci_sk_list.lock);
e0edf373
MH
209
210 kfree_skb(skb_copy);
470fe1b5
MH
211}
212
7129069e
JH
213/* Send frame to sockets with specific channel */
214void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
c08b1a1d 215 int flag, struct sock *skip_sk)
470fe1b5
MH
216{
217 struct sock *sk;
470fe1b5 218
7129069e 219 BT_DBG("channel %u len %d", channel, skb->len);
470fe1b5
MH
220
221 read_lock(&hci_sk_list.lock);
222
b67bfe0d 223 sk_for_each(sk, &hci_sk_list.head) {
470fe1b5
MH
224 struct sk_buff *nskb;
225
c08b1a1d 226 /* Ignore socket without the flag set */
c85be545 227 if (!hci_sock_test_flag(sk, flag))
d7f72f61
MH
228 continue;
229
c08b1a1d
MH
230 /* Skip the original socket */
231 if (sk == skip_sk)
17711c62
MH
232 continue;
233
234 if (sk->sk_state != BT_BOUND)
235 continue;
236
237 if (hci_pi(sk)->channel != channel)
238 continue;
239
240 nskb = skb_clone(skb, GFP_ATOMIC);
241 if (!nskb)
242 continue;
243
244 if (sock_queue_rcv_skb(sk, nskb))
245 kfree_skb(nskb);
246 }
247
248 read_unlock(&hci_sk_list.lock);
249}
250
cd82e61c
MH
251/* Send frame to monitor socket */
252void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
253{
cd82e61c 254 struct sk_buff *skb_copy = NULL;
2b531294 255 struct hci_mon_hdr *hdr;
cd82e61c
MH
256 __le16 opcode;
257
258 if (!atomic_read(&monitor_promisc))
259 return;
260
261 BT_DBG("hdev %p len %d", hdev, skb->len);
262
263 switch (bt_cb(skb)->pkt_type) {
264 case HCI_COMMAND_PKT:
dcf4adbf 265 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
cd82e61c
MH
266 break;
267 case HCI_EVENT_PKT:
dcf4adbf 268 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
cd82e61c
MH
269 break;
270 case HCI_ACLDATA_PKT:
271 if (bt_cb(skb)->incoming)
dcf4adbf 272 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
cd82e61c 273 else
dcf4adbf 274 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
cd82e61c
MH
275 break;
276 case HCI_SCODATA_PKT:
277 if (bt_cb(skb)->incoming)
dcf4adbf 278 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
cd82e61c 279 else
dcf4adbf 280 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
cd82e61c
MH
281 break;
282 default:
283 return;
284 }
285
2b531294
MH
286 /* Create a private copy with headroom */
287 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
288 if (!skb_copy)
289 return;
290
291 /* Put header before the data */
292 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
293 hdr->opcode = opcode;
294 hdr->index = cpu_to_le16(hdev->id);
295 hdr->len = cpu_to_le16(skb->len);
296
c08b1a1d
MH
297 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
298 HCI_SOCK_TRUSTED, NULL);
cd82e61c
MH
299 kfree_skb(skb_copy);
300}
301
cd82e61c
MH
302static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
303{
304 struct hci_mon_hdr *hdr;
305 struct hci_mon_new_index *ni;
306 struct sk_buff *skb;
307 __le16 opcode;
308
309 switch (event) {
310 case HCI_DEV_REG:
311 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
312 if (!skb)
313 return NULL;
314
315 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
316 ni->type = hdev->dev_type;
317 ni->bus = hdev->bus;
318 bacpy(&ni->bdaddr, &hdev->bdaddr);
319 memcpy(ni->name, hdev->name, 8);
320
dcf4adbf 321 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
cd82e61c
MH
322 break;
323
324 case HCI_DEV_UNREG:
325 skb = bt_skb_alloc(0, GFP_ATOMIC);
326 if (!skb)
327 return NULL;
328
dcf4adbf 329 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
cd82e61c
MH
330 break;
331
22db3cbc
MH
332 case HCI_DEV_OPEN:
333 skb = bt_skb_alloc(0, GFP_ATOMIC);
334 if (!skb)
335 return NULL;
336
337 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
338 break;
339
340 case HCI_DEV_CLOSE:
341 skb = bt_skb_alloc(0, GFP_ATOMIC);
342 if (!skb)
343 return NULL;
344
345 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
346 break;
347
cd82e61c
MH
348 default:
349 return NULL;
350 }
351
352 __net_timestamp(skb);
353
354 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
355 hdr->opcode = opcode;
356 hdr->index = cpu_to_le16(hdev->id);
357 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
358
359 return skb;
360}
361
362static void send_monitor_replay(struct sock *sk)
363{
364 struct hci_dev *hdev;
365
366 read_lock(&hci_dev_list_lock);
367
368 list_for_each_entry(hdev, &hci_dev_list, list) {
369 struct sk_buff *skb;
370
371 skb = create_monitor_event(hdev, HCI_DEV_REG);
372 if (!skb)
373 continue;
374
375 if (sock_queue_rcv_skb(sk, skb))
376 kfree_skb(skb);
22db3cbc
MH
377
378 if (!test_bit(HCI_RUNNING, &hdev->flags))
379 continue;
380
381 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
382 if (!skb)
383 continue;
384
385 if (sock_queue_rcv_skb(sk, skb))
386 kfree_skb(skb);
cd82e61c
MH
387 }
388
389 read_unlock(&hci_dev_list_lock);
390}
391
040030ef
MH
392/* Generate internal stack event */
393static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
394{
395 struct hci_event_hdr *hdr;
396 struct hci_ev_stack_internal *ev;
397 struct sk_buff *skb;
398
399 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
400 if (!skb)
401 return;
402
403 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
404 hdr->evt = HCI_EV_STACK_INTERNAL;
405 hdr->plen = sizeof(*ev) + dlen;
406
407 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
408 ev->type = type;
409 memcpy(ev->data, data, dlen);
410
411 bt_cb(skb)->incoming = 1;
412 __net_timestamp(skb);
413
414 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
040030ef
MH
415 hci_send_to_sock(hdev, skb);
416 kfree_skb(skb);
417}
418
419void hci_sock_dev_event(struct hci_dev *hdev, int event)
420{
040030ef
MH
421 BT_DBG("hdev %s event %d", hdev->name, event);
422
cd82e61c
MH
423 if (atomic_read(&monitor_promisc)) {
424 struct sk_buff *skb;
425
ed1b28a4 426 /* Send event to monitor */
cd82e61c
MH
427 skb = create_monitor_event(hdev, event);
428 if (skb) {
c08b1a1d
MH
429 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
430 HCI_SOCK_TRUSTED, NULL);
cd82e61c
MH
431 kfree_skb(skb);
432 }
433 }
434
ed1b28a4
MH
435 if (event <= HCI_DEV_DOWN) {
436 struct hci_ev_si_device ev;
437
438 /* Send event to sockets */
439 ev.event = event;
440 ev.dev_id = hdev->id;
441 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
442 }
040030ef
MH
443
444 if (event == HCI_DEV_UNREG) {
445 struct sock *sk;
040030ef
MH
446
447 /* Detach sockets from device */
448 read_lock(&hci_sk_list.lock);
b67bfe0d 449 sk_for_each(sk, &hci_sk_list.head) {
040030ef
MH
450 bh_lock_sock_nested(sk);
451 if (hci_pi(sk)->hdev == hdev) {
452 hci_pi(sk)->hdev = NULL;
453 sk->sk_err = EPIPE;
454 sk->sk_state = BT_OPEN;
455 sk->sk_state_change(sk);
456
457 hci_dev_put(hdev);
458 }
459 bh_unlock_sock(sk);
460 }
461 read_unlock(&hci_sk_list.lock);
462 }
463}
464
801c1e8d
JH
465static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
466{
467 struct hci_mgmt_chan *c;
468
469 list_for_each_entry(c, &mgmt_chan_list, list) {
470 if (c->channel == channel)
471 return c;
472 }
473
474 return NULL;
475}
476
477static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
478{
479 struct hci_mgmt_chan *c;
480
481 mutex_lock(&mgmt_chan_list_lock);
482 c = __hci_mgmt_chan_find(channel);
483 mutex_unlock(&mgmt_chan_list_lock);
484
485 return c;
486}
487
488int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
489{
490 if (c->channel < HCI_CHANNEL_CONTROL)
491 return -EINVAL;
492
493 mutex_lock(&mgmt_chan_list_lock);
494 if (__hci_mgmt_chan_find(c->channel)) {
495 mutex_unlock(&mgmt_chan_list_lock);
496 return -EALREADY;
497 }
498
499 list_add_tail(&c->list, &mgmt_chan_list);
500
501 mutex_unlock(&mgmt_chan_list_lock);
502
503 return 0;
504}
505EXPORT_SYMBOL(hci_mgmt_chan_register);
506
507void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
508{
509 mutex_lock(&mgmt_chan_list_lock);
510 list_del(&c->list);
511 mutex_unlock(&mgmt_chan_list_lock);
512}
513EXPORT_SYMBOL(hci_mgmt_chan_unregister);
514
1da177e4
LT
515static int hci_sock_release(struct socket *sock)
516{
517 struct sock *sk = sock->sk;
7b005bd3 518 struct hci_dev *hdev;
1da177e4
LT
519
520 BT_DBG("sock %p sk %p", sock, sk);
521
522 if (!sk)
523 return 0;
524
7b005bd3
MH
525 hdev = hci_pi(sk)->hdev;
526
cd82e61c
MH
527 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
528 atomic_dec(&monitor_promisc);
529
1da177e4
LT
530 bt_sock_unlink(&hci_sk_list, sk);
531
532 if (hdev) {
23500189 533 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
6b3cc1db
SF
534 /* When releasing an user channel exclusive access,
535 * call hci_dev_do_close directly instead of calling
536 * hci_dev_close to ensure the exclusive access will
537 * be released and the controller brought back down.
538 *
539 * The checking of HCI_AUTO_OFF is not needed in this
540 * case since it will have been cleared already when
541 * opening the user channel.
542 */
543 hci_dev_do_close(hdev);
9380f9ea
LP
544 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
545 mgmt_index_added(hdev);
23500189
MH
546 }
547
1da177e4
LT
548 atomic_dec(&hdev->promisc);
549 hci_dev_put(hdev);
550 }
551
552 sock_orphan(sk);
553
554 skb_queue_purge(&sk->sk_receive_queue);
555 skb_queue_purge(&sk->sk_write_queue);
556
557 sock_put(sk);
558 return 0;
559}
560
b2a66aad 561static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
f0358568
JH
562{
563 bdaddr_t bdaddr;
5e762444 564 int err;
f0358568
JH
565
566 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
567 return -EFAULT;
568
09fd0de5 569 hci_dev_lock(hdev);
5e762444 570
dcc36c16 571 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
5e762444 572
09fd0de5 573 hci_dev_unlock(hdev);
5e762444
AJ
574
575 return err;
f0358568
JH
576}
577
b2a66aad 578static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
f0358568
JH
579{
580 bdaddr_t bdaddr;
5e762444 581 int err;
f0358568
JH
582
583 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
584 return -EFAULT;
585
09fd0de5 586 hci_dev_lock(hdev);
5e762444 587
dcc36c16 588 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
5e762444 589
09fd0de5 590 hci_dev_unlock(hdev);
5e762444
AJ
591
592 return err;
f0358568
JH
593}
594
8e87d142 595/* Ioctls that require bound socket */
6039aa73
GP
596static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
597 unsigned long arg)
1da177e4
LT
598{
599 struct hci_dev *hdev = hci_pi(sk)->hdev;
600
601 if (!hdev)
602 return -EBADFD;
603
d7a5a11d 604 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
605 return -EBUSY;
606
d7a5a11d 607 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
fee746b0
MH
608 return -EOPNOTSUPP;
609
5b69bef5
MH
610 if (hdev->dev_type != HCI_BREDR)
611 return -EOPNOTSUPP;
612
1da177e4
LT
613 switch (cmd) {
614 case HCISETRAW:
615 if (!capable(CAP_NET_ADMIN))
bf5b30b8 616 return -EPERM;
db596681 617 return -EOPNOTSUPP;
1da177e4 618
1da177e4 619 case HCIGETCONNINFO:
40be492f
MH
620 return hci_get_conn_info(hdev, (void __user *) arg);
621
622 case HCIGETAUTHINFO:
623 return hci_get_auth_info(hdev, (void __user *) arg);
1da177e4 624
f0358568
JH
625 case HCIBLOCKADDR:
626 if (!capable(CAP_NET_ADMIN))
bf5b30b8 627 return -EPERM;
b2a66aad 628 return hci_sock_blacklist_add(hdev, (void __user *) arg);
f0358568
JH
629
630 case HCIUNBLOCKADDR:
631 if (!capable(CAP_NET_ADMIN))
bf5b30b8 632 return -EPERM;
b2a66aad 633 return hci_sock_blacklist_del(hdev, (void __user *) arg);
1da177e4 634 }
0736cfa8 635
324d36ed 636 return -ENOIOCTLCMD;
1da177e4
LT
637}
638
8fc9ced3
GP
639static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
640 unsigned long arg)
1da177e4 641{
40be492f 642 void __user *argp = (void __user *) arg;
0736cfa8 643 struct sock *sk = sock->sk;
1da177e4
LT
644 int err;
645
646 BT_DBG("cmd %x arg %lx", cmd, arg);
647
c1c4f956
MH
648 lock_sock(sk);
649
650 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
651 err = -EBADFD;
652 goto done;
653 }
654
655 release_sock(sk);
656
1da177e4
LT
657 switch (cmd) {
658 case HCIGETDEVLIST:
659 return hci_get_dev_list(argp);
660
661 case HCIGETDEVINFO:
662 return hci_get_dev_info(argp);
663
664 case HCIGETCONNLIST:
665 return hci_get_conn_list(argp);
666
667 case HCIDEVUP:
668 if (!capable(CAP_NET_ADMIN))
bf5b30b8 669 return -EPERM;
1da177e4
LT
670 return hci_dev_open(arg);
671
672 case HCIDEVDOWN:
673 if (!capable(CAP_NET_ADMIN))
bf5b30b8 674 return -EPERM;
1da177e4
LT
675 return hci_dev_close(arg);
676
677 case HCIDEVRESET:
678 if (!capable(CAP_NET_ADMIN))
bf5b30b8 679 return -EPERM;
1da177e4
LT
680 return hci_dev_reset(arg);
681
682 case HCIDEVRESTAT:
683 if (!capable(CAP_NET_ADMIN))
bf5b30b8 684 return -EPERM;
1da177e4
LT
685 return hci_dev_reset_stat(arg);
686
687 case HCISETSCAN:
688 case HCISETAUTH:
689 case HCISETENCRYPT:
690 case HCISETPTYPE:
691 case HCISETLINKPOL:
692 case HCISETLINKMODE:
693 case HCISETACLMTU:
694 case HCISETSCOMTU:
695 if (!capable(CAP_NET_ADMIN))
bf5b30b8 696 return -EPERM;
1da177e4
LT
697 return hci_dev_cmd(cmd, argp);
698
699 case HCIINQUIRY:
700 return hci_inquiry(argp);
1da177e4 701 }
c1c4f956
MH
702
703 lock_sock(sk);
704
705 err = hci_sock_bound_ioctl(sk, cmd, arg);
706
707done:
708 release_sock(sk);
709 return err;
1da177e4
LT
710}
711
8fc9ced3
GP
712static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
713 int addr_len)
1da177e4 714{
0381101f 715 struct sockaddr_hci haddr;
1da177e4
LT
716 struct sock *sk = sock->sk;
717 struct hci_dev *hdev = NULL;
0381101f 718 int len, err = 0;
1da177e4
LT
719
720 BT_DBG("sock %p sk %p", sock, sk);
721
0381101f
JH
722 if (!addr)
723 return -EINVAL;
724
725 memset(&haddr, 0, sizeof(haddr));
726 len = min_t(unsigned int, sizeof(haddr), addr_len);
727 memcpy(&haddr, addr, len);
728
729 if (haddr.hci_family != AF_BLUETOOTH)
730 return -EINVAL;
731
1da177e4
LT
732 lock_sock(sk);
733
7cc2ade2 734 if (sk->sk_state == BT_BOUND) {
1da177e4
LT
735 err = -EALREADY;
736 goto done;
737 }
738
7cc2ade2
MH
739 switch (haddr.hci_channel) {
740 case HCI_CHANNEL_RAW:
741 if (hci_pi(sk)->hdev) {
742 err = -EALREADY;
1da177e4
LT
743 goto done;
744 }
745
7cc2ade2
MH
746 if (haddr.hci_dev != HCI_DEV_NONE) {
747 hdev = hci_dev_get(haddr.hci_dev);
748 if (!hdev) {
749 err = -ENODEV;
750 goto done;
751 }
752
753 atomic_inc(&hdev->promisc);
754 }
755
756 hci_pi(sk)->hdev = hdev;
757 break;
758
23500189
MH
759 case HCI_CHANNEL_USER:
760 if (hci_pi(sk)->hdev) {
761 err = -EALREADY;
762 goto done;
763 }
764
765 if (haddr.hci_dev == HCI_DEV_NONE) {
766 err = -EINVAL;
767 goto done;
768 }
769
10a8b86f 770 if (!capable(CAP_NET_ADMIN)) {
23500189
MH
771 err = -EPERM;
772 goto done;
773 }
774
775 hdev = hci_dev_get(haddr.hci_dev);
776 if (!hdev) {
777 err = -ENODEV;
778 goto done;
779 }
780
781f899f 781 if (test_bit(HCI_INIT, &hdev->flags) ||
d7a5a11d 782 hci_dev_test_flag(hdev, HCI_SETUP) ||
781f899f
MH
783 hci_dev_test_flag(hdev, HCI_CONFIG) ||
784 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
785 test_bit(HCI_UP, &hdev->flags))) {
23500189
MH
786 err = -EBUSY;
787 hci_dev_put(hdev);
788 goto done;
789 }
790
238be788 791 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
23500189
MH
792 err = -EUSERS;
793 hci_dev_put(hdev);
794 goto done;
795 }
796
0602a8ad 797 mgmt_index_removed(hdev);
23500189
MH
798
799 err = hci_dev_open(hdev->id);
800 if (err) {
781f899f
MH
801 if (err == -EALREADY) {
802 /* In case the transport is already up and
803 * running, clear the error here.
804 *
805 * This can happen when opening an user
806 * channel and HCI_AUTO_OFF grace period
807 * is still active.
808 */
809 err = 0;
810 } else {
811 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
812 mgmt_index_added(hdev);
813 hci_dev_put(hdev);
814 goto done;
815 }
23500189
MH
816 }
817
818 atomic_inc(&hdev->promisc);
819
820 hci_pi(sk)->hdev = hdev;
821 break;
822
cd82e61c
MH
823 case HCI_CHANNEL_MONITOR:
824 if (haddr.hci_dev != HCI_DEV_NONE) {
825 err = -EINVAL;
826 goto done;
827 }
828
829 if (!capable(CAP_NET_RAW)) {
830 err = -EPERM;
831 goto done;
832 }
833
50ebc055
MH
834 /* The monitor interface is restricted to CAP_NET_RAW
835 * capabilities and with that implicitly trusted.
836 */
837 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
838
cd82e61c
MH
839 send_monitor_replay(sk);
840
841 atomic_inc(&monitor_promisc);
842 break;
843
7cc2ade2 844 default:
801c1e8d
JH
845 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
846 err = -EINVAL;
847 goto done;
848 }
849
850 if (haddr.hci_dev != HCI_DEV_NONE) {
851 err = -EINVAL;
852 goto done;
853 }
854
1195fbb8
MH
855 /* Users with CAP_NET_ADMIN capabilities are allowed
856 * access to all management commands and events. For
857 * untrusted users the interface is restricted and
858 * also only untrusted events are sent.
50ebc055 859 */
1195fbb8
MH
860 if (capable(CAP_NET_ADMIN))
861 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
50ebc055 862
f9207338
MH
863 /* At the moment the index and unconfigured index events
864 * are enabled unconditionally. Setting them on each
865 * socket when binding keeps this functionality. They
866 * however might be cleared later and then sending of these
867 * events will be disabled, but that is then intentional.
f6b7712e
MH
868 *
869 * This also enables generic events that are safe to be
870 * received by untrusted users. Example for such events
871 * are changes to settings, class of device, name etc.
f9207338
MH
872 */
873 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
874 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
875 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
f6b7712e 876 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
f9207338 877 }
801c1e8d 878 break;
1da177e4
LT
879 }
880
7cc2ade2 881
0381101f 882 hci_pi(sk)->channel = haddr.hci_channel;
1da177e4
LT
883 sk->sk_state = BT_BOUND;
884
885done:
886 release_sock(sk);
887 return err;
888}
889
8fc9ced3
GP
890static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
891 int *addr_len, int peer)
1da177e4
LT
892{
893 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
894 struct sock *sk = sock->sk;
9d4b68b2
MH
895 struct hci_dev *hdev;
896 int err = 0;
1da177e4
LT
897
898 BT_DBG("sock %p sk %p", sock, sk);
899
06f43cbc
MH
900 if (peer)
901 return -EOPNOTSUPP;
902
1da177e4
LT
903 lock_sock(sk);
904
9d4b68b2
MH
905 hdev = hci_pi(sk)->hdev;
906 if (!hdev) {
907 err = -EBADFD;
908 goto done;
909 }
910
1da177e4
LT
911 *addr_len = sizeof(*haddr);
912 haddr->hci_family = AF_BLUETOOTH;
7b005bd3 913 haddr->hci_dev = hdev->id;
9d4b68b2 914 haddr->hci_channel= hci_pi(sk)->channel;
1da177e4 915
9d4b68b2 916done:
1da177e4 917 release_sock(sk);
9d4b68b2 918 return err;
1da177e4
LT
919}
920
6039aa73
GP
921static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
922 struct sk_buff *skb)
1da177e4
LT
923{
924 __u32 mask = hci_pi(sk)->cmsg_mask;
925
0d48d939
MH
926 if (mask & HCI_CMSG_DIR) {
927 int incoming = bt_cb(skb)->incoming;
8fc9ced3
GP
928 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
929 &incoming);
0d48d939 930 }
1da177e4 931
a61bbcf2 932 if (mask & HCI_CMSG_TSTAMP) {
f6e623a6
JFS
933#ifdef CONFIG_COMPAT
934 struct compat_timeval ctv;
935#endif
a61bbcf2 936 struct timeval tv;
767c5eb5
MH
937 void *data;
938 int len;
a61bbcf2
PM
939
940 skb_get_timestamp(skb, &tv);
767c5eb5 941
1da97f83
DM
942 data = &tv;
943 len = sizeof(tv);
944#ifdef CONFIG_COMPAT
da88cea1
L
945 if (!COMPAT_USE_64BIT_TIME &&
946 (msg->msg_flags & MSG_CMSG_COMPAT)) {
767c5eb5
MH
947 ctv.tv_sec = tv.tv_sec;
948 ctv.tv_usec = tv.tv_usec;
949 data = &ctv;
950 len = sizeof(ctv);
767c5eb5 951 }
1da97f83 952#endif
767c5eb5
MH
953
954 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
a61bbcf2 955 }
1da177e4 956}
8e87d142 957
1b784140
YX
958static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
959 int flags)
1da177e4
LT
960{
961 int noblock = flags & MSG_DONTWAIT;
962 struct sock *sk = sock->sk;
963 struct sk_buff *skb;
964 int copied, err;
965
966 BT_DBG("sock %p, sk %p", sock, sk);
967
968 if (flags & (MSG_OOB))
969 return -EOPNOTSUPP;
970
971 if (sk->sk_state == BT_CLOSED)
972 return 0;
973
70f23020
AE
974 skb = skb_recv_datagram(sk, flags, noblock, &err);
975 if (!skb)
1da177e4
LT
976 return err;
977
1da177e4
LT
978 copied = skb->len;
979 if (len < copied) {
980 msg->msg_flags |= MSG_TRUNC;
981 copied = len;
982 }
983
badff6d0 984 skb_reset_transport_header(skb);
51f3d02b 985 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1da177e4 986
3a208627
MH
987 switch (hci_pi(sk)->channel) {
988 case HCI_CHANNEL_RAW:
989 hci_sock_cmsg(sk, msg, skb);
990 break;
23500189 991 case HCI_CHANNEL_USER:
cd82e61c
MH
992 case HCI_CHANNEL_MONITOR:
993 sock_recv_timestamp(msg, sk, skb);
994 break;
801c1e8d
JH
995 default:
996 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
997 sock_recv_timestamp(msg, sk, skb);
998 break;
3a208627 999 }
1da177e4
LT
1000
1001 skb_free_datagram(sk, skb);
1002
1003 return err ? : copied;
1004}
1005
fa4335d7
JH
1006static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1007 struct msghdr *msg, size_t msglen)
1008{
1009 void *buf;
1010 u8 *cp;
1011 struct mgmt_hdr *hdr;
1012 u16 opcode, index, len;
1013 struct hci_dev *hdev = NULL;
1014 const struct hci_mgmt_handler *handler;
1015 bool var_len, no_hdev;
1016 int err;
1017
1018 BT_DBG("got %zu bytes", msglen);
1019
1020 if (msglen < sizeof(*hdr))
1021 return -EINVAL;
1022
1023 buf = kmalloc(msglen, GFP_KERNEL);
1024 if (!buf)
1025 return -ENOMEM;
1026
1027 if (memcpy_from_msg(buf, msg, msglen)) {
1028 err = -EFAULT;
1029 goto done;
1030 }
1031
1032 hdr = buf;
1033 opcode = __le16_to_cpu(hdr->opcode);
1034 index = __le16_to_cpu(hdr->index);
1035 len = __le16_to_cpu(hdr->len);
1036
1037 if (len != msglen - sizeof(*hdr)) {
1038 err = -EINVAL;
1039 goto done;
1040 }
1041
1042 if (opcode >= chan->handler_count ||
1043 chan->handlers[opcode].func == NULL) {
1044 BT_DBG("Unknown op %u", opcode);
1045 err = mgmt_cmd_status(sk, index, opcode,
1046 MGMT_STATUS_UNKNOWN_COMMAND);
1047 goto done;
1048 }
1049
1050 handler = &chan->handlers[opcode];
1051
1052 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1053 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1054 err = mgmt_cmd_status(sk, index, opcode,
1055 MGMT_STATUS_PERMISSION_DENIED);
1056 goto done;
1057 }
1058
1059 if (index != MGMT_INDEX_NONE) {
1060 hdev = hci_dev_get(index);
1061 if (!hdev) {
1062 err = mgmt_cmd_status(sk, index, opcode,
1063 MGMT_STATUS_INVALID_INDEX);
1064 goto done;
1065 }
1066
1067 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1068 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1069 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1070 err = mgmt_cmd_status(sk, index, opcode,
1071 MGMT_STATUS_INVALID_INDEX);
1072 goto done;
1073 }
1074
1075 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1076 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1077 err = mgmt_cmd_status(sk, index, opcode,
1078 MGMT_STATUS_INVALID_INDEX);
1079 goto done;
1080 }
1081 }
1082
1083 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1084 if (no_hdev != !hdev) {
1085 err = mgmt_cmd_status(sk, index, opcode,
1086 MGMT_STATUS_INVALID_INDEX);
1087 goto done;
1088 }
1089
1090 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1091 if ((var_len && len < handler->data_len) ||
1092 (!var_len && len != handler->data_len)) {
1093 err = mgmt_cmd_status(sk, index, opcode,
1094 MGMT_STATUS_INVALID_PARAMS);
1095 goto done;
1096 }
1097
1098 if (hdev && chan->hdev_init)
1099 chan->hdev_init(sk, hdev);
1100
1101 cp = buf + sizeof(*hdr);
1102
1103 err = handler->func(sk, hdev, cp, len);
1104 if (err < 0)
1105 goto done;
1106
1107 err = msglen;
1108
1109done:
1110 if (hdev)
1111 hci_dev_put(hdev);
1112
1113 kfree(buf);
1114 return err;
1115}
1116
1b784140
YX
1117static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1118 size_t len)
1da177e4
LT
1119{
1120 struct sock *sk = sock->sk;
801c1e8d 1121 struct hci_mgmt_chan *chan;
1da177e4
LT
1122 struct hci_dev *hdev;
1123 struct sk_buff *skb;
1124 int err;
1125
1126 BT_DBG("sock %p sk %p", sock, sk);
1127
1128 if (msg->msg_flags & MSG_OOB)
1129 return -EOPNOTSUPP;
1130
1131 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1132 return -EINVAL;
1133
1134 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1135 return -EINVAL;
1136
1137 lock_sock(sk);
1138
0381101f
JH
1139 switch (hci_pi(sk)->channel) {
1140 case HCI_CHANNEL_RAW:
23500189 1141 case HCI_CHANNEL_USER:
0381101f 1142 break;
cd82e61c
MH
1143 case HCI_CHANNEL_MONITOR:
1144 err = -EOPNOTSUPP;
1145 goto done;
0381101f 1146 default:
801c1e8d
JH
1147 mutex_lock(&mgmt_chan_list_lock);
1148 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1149 if (chan)
fa4335d7 1150 err = hci_mgmt_cmd(chan, sk, msg, len);
801c1e8d
JH
1151 else
1152 err = -EINVAL;
1153
1154 mutex_unlock(&mgmt_chan_list_lock);
0381101f
JH
1155 goto done;
1156 }
1157
70f23020
AE
1158 hdev = hci_pi(sk)->hdev;
1159 if (!hdev) {
1da177e4
LT
1160 err = -EBADFD;
1161 goto done;
1162 }
1163
7e21addc
MH
1164 if (!test_bit(HCI_UP, &hdev->flags)) {
1165 err = -ENETDOWN;
1166 goto done;
1167 }
1168
70f23020
AE
1169 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1170 if (!skb)
1da177e4
LT
1171 goto done;
1172
6ce8e9ce 1173 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1da177e4
LT
1174 err = -EFAULT;
1175 goto drop;
1176 }
1177
0d48d939 1178 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1da177e4 1179 skb_pull(skb, 1);
1da177e4 1180
1bc5ad16
MH
1181 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1182 /* No permission check is needed for user channel
1183 * since that gets enforced when binding the socket.
1184 *
1185 * However check that the packet type is valid.
1186 */
1187 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1188 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1189 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1190 err = -EINVAL;
1191 goto drop;
1192 }
1193
1194 skb_queue_tail(&hdev->raw_q, skb);
1195 queue_work(hdev->workqueue, &hdev->tx_work);
1196 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
83985319 1197 u16 opcode = get_unaligned_le16(skb->data);
1da177e4
LT
1198 u16 ogf = hci_opcode_ogf(opcode);
1199 u16 ocf = hci_opcode_ocf(opcode);
1200
1201 if (((ogf > HCI_SFLT_MAX_OGF) ||
3bb3c755
GP
1202 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1203 &hci_sec_filter.ocf_mask[ogf])) &&
1204 !capable(CAP_NET_RAW)) {
1da177e4
LT
1205 err = -EPERM;
1206 goto drop;
1207 }
1208
fee746b0 1209 if (ogf == 0x3f) {
1da177e4 1210 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 1211 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 1212 } else {
49c922bb 1213 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
1214 * single-command requests.
1215 */
db6e3e8d 1216 bt_cb(skb)->req.start = true;
11714b3d 1217
1da177e4 1218 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 1219 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
1220 }
1221 } else {
1222 if (!capable(CAP_NET_RAW)) {
1223 err = -EPERM;
1224 goto drop;
1225 }
1226
1227 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 1228 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
1229 }
1230
1231 err = len;
1232
1233done:
1234 release_sock(sk);
1235 return err;
1236
1237drop:
1238 kfree_skb(skb);
1239 goto done;
1240}
1241
8fc9ced3
GP
1242static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1243 char __user *optval, unsigned int len)
1da177e4
LT
1244{
1245 struct hci_ufilter uf = { .opcode = 0 };
1246 struct sock *sk = sock->sk;
1247 int err = 0, opt = 0;
1248
1249 BT_DBG("sk %p, opt %d", sk, optname);
1250
1251 lock_sock(sk);
1252
2f39cdb7 1253 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
c2371e80 1254 err = -EBADFD;
2f39cdb7
MH
1255 goto done;
1256 }
1257
1da177e4
LT
1258 switch (optname) {
1259 case HCI_DATA_DIR:
1260 if (get_user(opt, (int __user *)optval)) {
1261 err = -EFAULT;
1262 break;
1263 }
1264
1265 if (opt)
1266 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1267 else
1268 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1269 break;
1270
1271 case HCI_TIME_STAMP:
1272 if (get_user(opt, (int __user *)optval)) {
1273 err = -EFAULT;
1274 break;
1275 }
1276
1277 if (opt)
1278 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1279 else
1280 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1281 break;
1282
1283 case HCI_FILTER:
0878b666
MH
1284 {
1285 struct hci_filter *f = &hci_pi(sk)->filter;
1286
1287 uf.type_mask = f->type_mask;
1288 uf.opcode = f->opcode;
1289 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1290 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1291 }
1292
1da177e4
LT
1293 len = min_t(unsigned int, len, sizeof(uf));
1294 if (copy_from_user(&uf, optval, len)) {
1295 err = -EFAULT;
1296 break;
1297 }
1298
1299 if (!capable(CAP_NET_RAW)) {
1300 uf.type_mask &= hci_sec_filter.type_mask;
1301 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1302 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1303 }
1304
1305 {
1306 struct hci_filter *f = &hci_pi(sk)->filter;
1307
1308 f->type_mask = uf.type_mask;
1309 f->opcode = uf.opcode;
1310 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1311 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1312 }
8e87d142 1313 break;
1da177e4
LT
1314
1315 default:
1316 err = -ENOPROTOOPT;
1317 break;
1318 }
1319
2f39cdb7 1320done:
1da177e4
LT
1321 release_sock(sk);
1322 return err;
1323}
1324
8fc9ced3
GP
1325static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1326 char __user *optval, int __user *optlen)
1da177e4
LT
1327{
1328 struct hci_ufilter uf;
1329 struct sock *sk = sock->sk;
cedc5469
MH
1330 int len, opt, err = 0;
1331
1332 BT_DBG("sk %p, opt %d", sk, optname);
1da177e4
LT
1333
1334 if (get_user(len, optlen))
1335 return -EFAULT;
1336
cedc5469
MH
1337 lock_sock(sk);
1338
1339 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
c2371e80 1340 err = -EBADFD;
cedc5469
MH
1341 goto done;
1342 }
1343
1da177e4
LT
1344 switch (optname) {
1345 case HCI_DATA_DIR:
1346 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1347 opt = 1;
8e87d142 1348 else
1da177e4
LT
1349 opt = 0;
1350
1351 if (put_user(opt, optval))
cedc5469 1352 err = -EFAULT;
1da177e4
LT
1353 break;
1354
1355 case HCI_TIME_STAMP:
1356 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1357 opt = 1;
8e87d142 1358 else
1da177e4
LT
1359 opt = 0;
1360
1361 if (put_user(opt, optval))
cedc5469 1362 err = -EFAULT;
1da177e4
LT
1363 break;
1364
1365 case HCI_FILTER:
1366 {
1367 struct hci_filter *f = &hci_pi(sk)->filter;
1368
e15ca9a0 1369 memset(&uf, 0, sizeof(uf));
1da177e4
LT
1370 uf.type_mask = f->type_mask;
1371 uf.opcode = f->opcode;
1372 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1373 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1374 }
1375
1376 len = min_t(unsigned int, len, sizeof(uf));
1377 if (copy_to_user(optval, &uf, len))
cedc5469 1378 err = -EFAULT;
1da177e4
LT
1379 break;
1380
1381 default:
cedc5469 1382 err = -ENOPROTOOPT;
1da177e4
LT
1383 break;
1384 }
1385
cedc5469
MH
1386done:
1387 release_sock(sk);
1388 return err;
1da177e4
LT
1389}
1390
90ddc4f0 1391static const struct proto_ops hci_sock_ops = {
1da177e4
LT
1392 .family = PF_BLUETOOTH,
1393 .owner = THIS_MODULE,
1394 .release = hci_sock_release,
1395 .bind = hci_sock_bind,
1396 .getname = hci_sock_getname,
1397 .sendmsg = hci_sock_sendmsg,
1398 .recvmsg = hci_sock_recvmsg,
1399 .ioctl = hci_sock_ioctl,
1400 .poll = datagram_poll,
1401 .listen = sock_no_listen,
1402 .shutdown = sock_no_shutdown,
1403 .setsockopt = hci_sock_setsockopt,
1404 .getsockopt = hci_sock_getsockopt,
1405 .connect = sock_no_connect,
1406 .socketpair = sock_no_socketpair,
1407 .accept = sock_no_accept,
1408 .mmap = sock_no_mmap
1409};
1410
1411static struct proto hci_sk_proto = {
1412 .name = "HCI",
1413 .owner = THIS_MODULE,
1414 .obj_size = sizeof(struct hci_pinfo)
1415};
1416
3f378b68
EP
1417static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1418 int kern)
1da177e4
LT
1419{
1420 struct sock *sk;
1421
1422 BT_DBG("sock %p", sock);
1423
1424 if (sock->type != SOCK_RAW)
1425 return -ESOCKTNOSUPPORT;
1426
1427 sock->ops = &hci_sock_ops;
1428
11aa9c28 1429 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1da177e4
LT
1430 if (!sk)
1431 return -ENOMEM;
1432
1433 sock_init_data(sock, sk);
1434
1435 sock_reset_flag(sk, SOCK_ZAPPED);
1436
1437 sk->sk_protocol = protocol;
1438
1439 sock->state = SS_UNCONNECTED;
1440 sk->sk_state = BT_OPEN;
1441
1442 bt_sock_link(&hci_sk_list, sk);
1443 return 0;
1444}
1445
ec1b4cf7 1446static const struct net_proto_family hci_sock_family_ops = {
1da177e4
LT
1447 .family = PF_BLUETOOTH,
1448 .owner = THIS_MODULE,
1449 .create = hci_sock_create,
1450};
1451
1da177e4
LT
1452int __init hci_sock_init(void)
1453{
1454 int err;
1455
b0a8e282
MH
1456 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1457
1da177e4
LT
1458 err = proto_register(&hci_sk_proto, 0);
1459 if (err < 0)
1460 return err;
1461
1462 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
f7c86637
MY
1463 if (err < 0) {
1464 BT_ERR("HCI socket registration failed");
1da177e4 1465 goto error;
f7c86637
MY
1466 }
1467
b0316615 1468 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
f7c86637
MY
1469 if (err < 0) {
1470 BT_ERR("Failed to create HCI proc file");
1471 bt_sock_unregister(BTPROTO_HCI);
1472 goto error;
1473 }
1da177e4 1474
1da177e4
LT
1475 BT_INFO("HCI socket layer initialized");
1476
1477 return 0;
1478
1479error:
1da177e4
LT
1480 proto_unregister(&hci_sk_proto);
1481 return err;
1482}
1483
b7440a14 1484void hci_sock_cleanup(void)
1da177e4 1485{
f7c86637 1486 bt_procfs_cleanup(&init_net, "hci");
5e9d7f86 1487 bt_sock_unregister(BTPROTO_HCI);
1da177e4 1488 proto_unregister(&hci_sk_proto);
1da177e4 1489}
This page took 1.073342 seconds and 5 git commands to generate.