Bluetooth: Send index information updates to monitor channel
[deliverable/linux.git] / net / bluetooth / hci_sock.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
8c520a59 27#include <linux/export.h>
1da177e4
LT
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
cd82e61c 32#include <net/bluetooth/hci_mon.h>
fa4335d7
JH
33#include <net/bluetooth/mgmt.h>
34
35#include "mgmt_util.h"
1da177e4 36
801c1e8d
JH
37static LIST_HEAD(mgmt_chan_list);
38static DEFINE_MUTEX(mgmt_chan_list_lock);
39
cd82e61c
MH
40static atomic_t monitor_promisc = ATOMIC_INIT(0);
41
1da177e4
LT
42/* ----- HCI socket interface ----- */
43
863def58
MH
44/* Socket info */
45#define hci_pi(sk) ((struct hci_pinfo *) sk)
46
47struct hci_pinfo {
48 struct bt_sock bt;
49 struct hci_dev *hdev;
50 struct hci_filter filter;
51 __u32 cmsg_mask;
52 unsigned short channel;
6befc644 53 unsigned long flags;
863def58
MH
54};
55
6befc644
MH
56void hci_sock_set_flag(struct sock *sk, int nr)
57{
58 set_bit(nr, &hci_pi(sk)->flags);
59}
60
61void hci_sock_clear_flag(struct sock *sk, int nr)
62{
63 clear_bit(nr, &hci_pi(sk)->flags);
64}
65
c85be545
MH
66int hci_sock_test_flag(struct sock *sk, int nr)
67{
68 return test_bit(nr, &hci_pi(sk)->flags);
69}
70
d0f172b1
JH
71unsigned short hci_sock_get_channel(struct sock *sk)
72{
73 return hci_pi(sk)->channel;
74}
75
9391976a 76static inline int hci_test_bit(int nr, const void *addr)
1da177e4 77{
9391976a 78 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
1da177e4
LT
79}
80
81/* Security filter */
3ad254f7
MH
82#define HCI_SFLT_MAX_OGF 5
83
84struct hci_sec_filter {
85 __u32 type_mask;
86 __u32 event_mask[2];
87 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88};
89
7e67c112 90static const struct hci_sec_filter hci_sec_filter = {
1da177e4
LT
91 /* Packet types */
92 0x10,
93 /* Events */
dd7f5527 94 { 0x1000d9fe, 0x0000b00c },
1da177e4
LT
95 /* Commands */
96 {
97 { 0x0 },
98 /* OGF_LINK_CTL */
7c631a67 99 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
1da177e4 100 /* OGF_LINK_POLICY */
7c631a67 101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
1da177e4 102 /* OGF_HOST_CTL */
7c631a67 103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
1da177e4 104 /* OGF_INFO_PARAM */
7c631a67 105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
1da177e4 106 /* OGF_STATUS_PARAM */
7c631a67 107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
1da177e4
LT
108 }
109};
110
111static struct bt_sock_list hci_sk_list = {
d5fb2962 112 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
1da177e4
LT
113};
114
f81fe64f
MH
115static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116{
117 struct hci_filter *flt;
118 int flt_type, flt_event;
119
120 /* Apply filter */
121 flt = &hci_pi(sk)->filter;
122
123 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
124 flt_type = 0;
125 else
126 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
127
128 if (!test_bit(flt_type, &flt->type_mask))
129 return true;
130
131 /* Extra filter for event packets only */
132 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
133 return false;
134
135 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
136
137 if (!hci_test_bit(flt_event, &flt->event_mask))
138 return true;
139
140 /* Check filter only when opcode is set */
141 if (!flt->opcode)
142 return false;
143
144 if (flt_event == HCI_EV_CMD_COMPLETE &&
145 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
146 return true;
147
148 if (flt_event == HCI_EV_CMD_STATUS &&
149 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
150 return true;
151
152 return false;
153}
154
1da177e4 155/* Send frame to RAW socket */
470fe1b5 156void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
157{
158 struct sock *sk;
e0edf373 159 struct sk_buff *skb_copy = NULL;
1da177e4
LT
160
161 BT_DBG("hdev %p len %d", hdev, skb->len);
162
163 read_lock(&hci_sk_list.lock);
470fe1b5 164
b67bfe0d 165 sk_for_each(sk, &hci_sk_list.head) {
1da177e4
LT
166 struct sk_buff *nskb;
167
168 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
169 continue;
170
171 /* Don't send frame to the socket it came from */
172 if (skb->sk == sk)
173 continue;
174
23500189
MH
175 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
176 if (is_filtered_packet(sk, skb))
177 continue;
178 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
179 if (!bt_cb(skb)->incoming)
180 continue;
181 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
182 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
183 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
184 continue;
185 } else {
186 /* Don't send frame to other channel types */
1da177e4 187 continue;
23500189 188 }
1da177e4 189
e0edf373
MH
190 if (!skb_copy) {
191 /* Create a private copy with headroom */
bad93e9d 192 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
e0edf373
MH
193 if (!skb_copy)
194 continue;
195
196 /* Put type byte before the data */
197 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
198 }
199
200 nskb = skb_clone(skb_copy, GFP_ATOMIC);
70f23020 201 if (!nskb)
1da177e4
LT
202 continue;
203
470fe1b5
MH
204 if (sock_queue_rcv_skb(sk, nskb))
205 kfree_skb(nskb);
206 }
207
208 read_unlock(&hci_sk_list.lock);
e0edf373
MH
209
210 kfree_skb(skb_copy);
470fe1b5
MH
211}
212
7129069e
JH
213/* Send frame to sockets with specific channel */
214void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
c08b1a1d 215 int flag, struct sock *skip_sk)
470fe1b5
MH
216{
217 struct sock *sk;
470fe1b5 218
7129069e 219 BT_DBG("channel %u len %d", channel, skb->len);
470fe1b5
MH
220
221 read_lock(&hci_sk_list.lock);
222
b67bfe0d 223 sk_for_each(sk, &hci_sk_list.head) {
470fe1b5
MH
224 struct sk_buff *nskb;
225
c08b1a1d 226 /* Ignore socket without the flag set */
c85be545 227 if (!hci_sock_test_flag(sk, flag))
d7f72f61
MH
228 continue;
229
c08b1a1d
MH
230 /* Skip the original socket */
231 if (sk == skip_sk)
17711c62
MH
232 continue;
233
234 if (sk->sk_state != BT_BOUND)
235 continue;
236
237 if (hci_pi(sk)->channel != channel)
238 continue;
239
240 nskb = skb_clone(skb, GFP_ATOMIC);
241 if (!nskb)
242 continue;
243
244 if (sock_queue_rcv_skb(sk, nskb))
245 kfree_skb(nskb);
246 }
247
248 read_unlock(&hci_sk_list.lock);
249}
250
cd82e61c
MH
251/* Send frame to monitor socket */
252void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
253{
cd82e61c 254 struct sk_buff *skb_copy = NULL;
2b531294 255 struct hci_mon_hdr *hdr;
cd82e61c
MH
256 __le16 opcode;
257
258 if (!atomic_read(&monitor_promisc))
259 return;
260
261 BT_DBG("hdev %p len %d", hdev, skb->len);
262
263 switch (bt_cb(skb)->pkt_type) {
264 case HCI_COMMAND_PKT:
dcf4adbf 265 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
cd82e61c
MH
266 break;
267 case HCI_EVENT_PKT:
dcf4adbf 268 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
cd82e61c
MH
269 break;
270 case HCI_ACLDATA_PKT:
271 if (bt_cb(skb)->incoming)
dcf4adbf 272 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
cd82e61c 273 else
dcf4adbf 274 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
cd82e61c
MH
275 break;
276 case HCI_SCODATA_PKT:
277 if (bt_cb(skb)->incoming)
dcf4adbf 278 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
cd82e61c 279 else
dcf4adbf 280 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
cd82e61c
MH
281 break;
282 default:
283 return;
284 }
285
2b531294
MH
286 /* Create a private copy with headroom */
287 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
288 if (!skb_copy)
289 return;
290
291 /* Put header before the data */
292 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
293 hdr->opcode = opcode;
294 hdr->index = cpu_to_le16(hdev->id);
295 hdr->len = cpu_to_le16(skb->len);
296
c08b1a1d
MH
297 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
298 HCI_SOCK_TRUSTED, NULL);
cd82e61c
MH
299 kfree_skb(skb_copy);
300}
301
cd82e61c
MH
302static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
303{
304 struct hci_mon_hdr *hdr;
305 struct hci_mon_new_index *ni;
6c566dd5 306 struct hci_mon_index_info *ii;
cd82e61c
MH
307 struct sk_buff *skb;
308 __le16 opcode;
309
310 switch (event) {
311 case HCI_DEV_REG:
312 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
313 if (!skb)
314 return NULL;
315
6c566dd5 316 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
cd82e61c
MH
317 ni->type = hdev->dev_type;
318 ni->bus = hdev->bus;
319 bacpy(&ni->bdaddr, &hdev->bdaddr);
320 memcpy(ni->name, hdev->name, 8);
321
dcf4adbf 322 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
cd82e61c
MH
323 break;
324
325 case HCI_DEV_UNREG:
326 skb = bt_skb_alloc(0, GFP_ATOMIC);
327 if (!skb)
328 return NULL;
329
dcf4adbf 330 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
cd82e61c
MH
331 break;
332
6c566dd5
MH
333 case HCI_DEV_UP:
334 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
335 if (!skb)
336 return NULL;
337
338 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
339 bacpy(&ii->bdaddr, &hdev->bdaddr);
340 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
341
342 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
343 break;
344
22db3cbc
MH
345 case HCI_DEV_OPEN:
346 skb = bt_skb_alloc(0, GFP_ATOMIC);
347 if (!skb)
348 return NULL;
349
350 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
351 break;
352
353 case HCI_DEV_CLOSE:
354 skb = bt_skb_alloc(0, GFP_ATOMIC);
355 if (!skb)
356 return NULL;
357
358 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
359 break;
360
cd82e61c
MH
361 default:
362 return NULL;
363 }
364
365 __net_timestamp(skb);
366
367 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
368 hdr->opcode = opcode;
369 hdr->index = cpu_to_le16(hdev->id);
370 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
371
372 return skb;
373}
374
375static void send_monitor_replay(struct sock *sk)
376{
377 struct hci_dev *hdev;
378
379 read_lock(&hci_dev_list_lock);
380
381 list_for_each_entry(hdev, &hci_dev_list, list) {
382 struct sk_buff *skb;
383
384 skb = create_monitor_event(hdev, HCI_DEV_REG);
385 if (!skb)
386 continue;
387
388 if (sock_queue_rcv_skb(sk, skb))
389 kfree_skb(skb);
22db3cbc
MH
390
391 if (!test_bit(HCI_RUNNING, &hdev->flags))
392 continue;
393
394 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
395 if (!skb)
396 continue;
397
398 if (sock_queue_rcv_skb(sk, skb))
399 kfree_skb(skb);
6c566dd5
MH
400
401 if (!test_bit(HCI_UP, &hdev->flags))
402 continue;
403
404 skb = create_monitor_event(hdev, HCI_DEV_UP);
405 if (!skb)
406 continue;
407
408 if (sock_queue_rcv_skb(sk, skb))
409 kfree_skb(skb);
cd82e61c
MH
410 }
411
412 read_unlock(&hci_dev_list_lock);
413}
414
040030ef
MH
415/* Generate internal stack event */
416static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
417{
418 struct hci_event_hdr *hdr;
419 struct hci_ev_stack_internal *ev;
420 struct sk_buff *skb;
421
422 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
423 if (!skb)
424 return;
425
426 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
427 hdr->evt = HCI_EV_STACK_INTERNAL;
428 hdr->plen = sizeof(*ev) + dlen;
429
430 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
431 ev->type = type;
432 memcpy(ev->data, data, dlen);
433
434 bt_cb(skb)->incoming = 1;
435 __net_timestamp(skb);
436
437 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
040030ef
MH
438 hci_send_to_sock(hdev, skb);
439 kfree_skb(skb);
440}
441
442void hci_sock_dev_event(struct hci_dev *hdev, int event)
443{
040030ef
MH
444 BT_DBG("hdev %s event %d", hdev->name, event);
445
cd82e61c
MH
446 if (atomic_read(&monitor_promisc)) {
447 struct sk_buff *skb;
448
ed1b28a4 449 /* Send event to monitor */
cd82e61c
MH
450 skb = create_monitor_event(hdev, event);
451 if (skb) {
c08b1a1d
MH
452 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
453 HCI_SOCK_TRUSTED, NULL);
cd82e61c
MH
454 kfree_skb(skb);
455 }
456 }
457
ed1b28a4
MH
458 if (event <= HCI_DEV_DOWN) {
459 struct hci_ev_si_device ev;
460
461 /* Send event to sockets */
462 ev.event = event;
463 ev.dev_id = hdev->id;
464 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
465 }
040030ef
MH
466
467 if (event == HCI_DEV_UNREG) {
468 struct sock *sk;
040030ef
MH
469
470 /* Detach sockets from device */
471 read_lock(&hci_sk_list.lock);
b67bfe0d 472 sk_for_each(sk, &hci_sk_list.head) {
040030ef
MH
473 bh_lock_sock_nested(sk);
474 if (hci_pi(sk)->hdev == hdev) {
475 hci_pi(sk)->hdev = NULL;
476 sk->sk_err = EPIPE;
477 sk->sk_state = BT_OPEN;
478 sk->sk_state_change(sk);
479
480 hci_dev_put(hdev);
481 }
482 bh_unlock_sock(sk);
483 }
484 read_unlock(&hci_sk_list.lock);
485 }
486}
487
801c1e8d
JH
488static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
489{
490 struct hci_mgmt_chan *c;
491
492 list_for_each_entry(c, &mgmt_chan_list, list) {
493 if (c->channel == channel)
494 return c;
495 }
496
497 return NULL;
498}
499
500static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
501{
502 struct hci_mgmt_chan *c;
503
504 mutex_lock(&mgmt_chan_list_lock);
505 c = __hci_mgmt_chan_find(channel);
506 mutex_unlock(&mgmt_chan_list_lock);
507
508 return c;
509}
510
511int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
512{
513 if (c->channel < HCI_CHANNEL_CONTROL)
514 return -EINVAL;
515
516 mutex_lock(&mgmt_chan_list_lock);
517 if (__hci_mgmt_chan_find(c->channel)) {
518 mutex_unlock(&mgmt_chan_list_lock);
519 return -EALREADY;
520 }
521
522 list_add_tail(&c->list, &mgmt_chan_list);
523
524 mutex_unlock(&mgmt_chan_list_lock);
525
526 return 0;
527}
528EXPORT_SYMBOL(hci_mgmt_chan_register);
529
530void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
531{
532 mutex_lock(&mgmt_chan_list_lock);
533 list_del(&c->list);
534 mutex_unlock(&mgmt_chan_list_lock);
535}
536EXPORT_SYMBOL(hci_mgmt_chan_unregister);
537
1da177e4
LT
538static int hci_sock_release(struct socket *sock)
539{
540 struct sock *sk = sock->sk;
7b005bd3 541 struct hci_dev *hdev;
1da177e4
LT
542
543 BT_DBG("sock %p sk %p", sock, sk);
544
545 if (!sk)
546 return 0;
547
7b005bd3
MH
548 hdev = hci_pi(sk)->hdev;
549
cd82e61c
MH
550 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
551 atomic_dec(&monitor_promisc);
552
1da177e4
LT
553 bt_sock_unlink(&hci_sk_list, sk);
554
555 if (hdev) {
23500189 556 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
6b3cc1db
SF
557 /* When releasing an user channel exclusive access,
558 * call hci_dev_do_close directly instead of calling
559 * hci_dev_close to ensure the exclusive access will
560 * be released and the controller brought back down.
561 *
562 * The checking of HCI_AUTO_OFF is not needed in this
563 * case since it will have been cleared already when
564 * opening the user channel.
565 */
566 hci_dev_do_close(hdev);
9380f9ea
LP
567 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
568 mgmt_index_added(hdev);
23500189
MH
569 }
570
1da177e4
LT
571 atomic_dec(&hdev->promisc);
572 hci_dev_put(hdev);
573 }
574
575 sock_orphan(sk);
576
577 skb_queue_purge(&sk->sk_receive_queue);
578 skb_queue_purge(&sk->sk_write_queue);
579
580 sock_put(sk);
581 return 0;
582}
583
b2a66aad 584static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
f0358568
JH
585{
586 bdaddr_t bdaddr;
5e762444 587 int err;
f0358568
JH
588
589 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
590 return -EFAULT;
591
09fd0de5 592 hci_dev_lock(hdev);
5e762444 593
dcc36c16 594 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
5e762444 595
09fd0de5 596 hci_dev_unlock(hdev);
5e762444
AJ
597
598 return err;
f0358568
JH
599}
600
b2a66aad 601static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
f0358568
JH
602{
603 bdaddr_t bdaddr;
5e762444 604 int err;
f0358568
JH
605
606 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
607 return -EFAULT;
608
09fd0de5 609 hci_dev_lock(hdev);
5e762444 610
dcc36c16 611 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
5e762444 612
09fd0de5 613 hci_dev_unlock(hdev);
5e762444
AJ
614
615 return err;
f0358568
JH
616}
617
8e87d142 618/* Ioctls that require bound socket */
6039aa73
GP
619static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
620 unsigned long arg)
1da177e4
LT
621{
622 struct hci_dev *hdev = hci_pi(sk)->hdev;
623
624 if (!hdev)
625 return -EBADFD;
626
d7a5a11d 627 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
628 return -EBUSY;
629
d7a5a11d 630 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
fee746b0
MH
631 return -EOPNOTSUPP;
632
5b69bef5
MH
633 if (hdev->dev_type != HCI_BREDR)
634 return -EOPNOTSUPP;
635
1da177e4
LT
636 switch (cmd) {
637 case HCISETRAW:
638 if (!capable(CAP_NET_ADMIN))
bf5b30b8 639 return -EPERM;
db596681 640 return -EOPNOTSUPP;
1da177e4 641
1da177e4 642 case HCIGETCONNINFO:
40be492f
MH
643 return hci_get_conn_info(hdev, (void __user *) arg);
644
645 case HCIGETAUTHINFO:
646 return hci_get_auth_info(hdev, (void __user *) arg);
1da177e4 647
f0358568
JH
648 case HCIBLOCKADDR:
649 if (!capable(CAP_NET_ADMIN))
bf5b30b8 650 return -EPERM;
b2a66aad 651 return hci_sock_blacklist_add(hdev, (void __user *) arg);
f0358568
JH
652
653 case HCIUNBLOCKADDR:
654 if (!capable(CAP_NET_ADMIN))
bf5b30b8 655 return -EPERM;
b2a66aad 656 return hci_sock_blacklist_del(hdev, (void __user *) arg);
1da177e4 657 }
0736cfa8 658
324d36ed 659 return -ENOIOCTLCMD;
1da177e4
LT
660}
661
8fc9ced3
GP
662static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
663 unsigned long arg)
1da177e4 664{
40be492f 665 void __user *argp = (void __user *) arg;
0736cfa8 666 struct sock *sk = sock->sk;
1da177e4
LT
667 int err;
668
669 BT_DBG("cmd %x arg %lx", cmd, arg);
670
c1c4f956
MH
671 lock_sock(sk);
672
673 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
674 err = -EBADFD;
675 goto done;
676 }
677
678 release_sock(sk);
679
1da177e4
LT
680 switch (cmd) {
681 case HCIGETDEVLIST:
682 return hci_get_dev_list(argp);
683
684 case HCIGETDEVINFO:
685 return hci_get_dev_info(argp);
686
687 case HCIGETCONNLIST:
688 return hci_get_conn_list(argp);
689
690 case HCIDEVUP:
691 if (!capable(CAP_NET_ADMIN))
bf5b30b8 692 return -EPERM;
1da177e4
LT
693 return hci_dev_open(arg);
694
695 case HCIDEVDOWN:
696 if (!capable(CAP_NET_ADMIN))
bf5b30b8 697 return -EPERM;
1da177e4
LT
698 return hci_dev_close(arg);
699
700 case HCIDEVRESET:
701 if (!capable(CAP_NET_ADMIN))
bf5b30b8 702 return -EPERM;
1da177e4
LT
703 return hci_dev_reset(arg);
704
705 case HCIDEVRESTAT:
706 if (!capable(CAP_NET_ADMIN))
bf5b30b8 707 return -EPERM;
1da177e4
LT
708 return hci_dev_reset_stat(arg);
709
710 case HCISETSCAN:
711 case HCISETAUTH:
712 case HCISETENCRYPT:
713 case HCISETPTYPE:
714 case HCISETLINKPOL:
715 case HCISETLINKMODE:
716 case HCISETACLMTU:
717 case HCISETSCOMTU:
718 if (!capable(CAP_NET_ADMIN))
bf5b30b8 719 return -EPERM;
1da177e4
LT
720 return hci_dev_cmd(cmd, argp);
721
722 case HCIINQUIRY:
723 return hci_inquiry(argp);
1da177e4 724 }
c1c4f956
MH
725
726 lock_sock(sk);
727
728 err = hci_sock_bound_ioctl(sk, cmd, arg);
729
730done:
731 release_sock(sk);
732 return err;
1da177e4
LT
733}
734
8fc9ced3
GP
735static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
736 int addr_len)
1da177e4 737{
0381101f 738 struct sockaddr_hci haddr;
1da177e4
LT
739 struct sock *sk = sock->sk;
740 struct hci_dev *hdev = NULL;
0381101f 741 int len, err = 0;
1da177e4
LT
742
743 BT_DBG("sock %p sk %p", sock, sk);
744
0381101f
JH
745 if (!addr)
746 return -EINVAL;
747
748 memset(&haddr, 0, sizeof(haddr));
749 len = min_t(unsigned int, sizeof(haddr), addr_len);
750 memcpy(&haddr, addr, len);
751
752 if (haddr.hci_family != AF_BLUETOOTH)
753 return -EINVAL;
754
1da177e4
LT
755 lock_sock(sk);
756
7cc2ade2 757 if (sk->sk_state == BT_BOUND) {
1da177e4
LT
758 err = -EALREADY;
759 goto done;
760 }
761
7cc2ade2
MH
762 switch (haddr.hci_channel) {
763 case HCI_CHANNEL_RAW:
764 if (hci_pi(sk)->hdev) {
765 err = -EALREADY;
1da177e4
LT
766 goto done;
767 }
768
7cc2ade2
MH
769 if (haddr.hci_dev != HCI_DEV_NONE) {
770 hdev = hci_dev_get(haddr.hci_dev);
771 if (!hdev) {
772 err = -ENODEV;
773 goto done;
774 }
775
776 atomic_inc(&hdev->promisc);
777 }
778
779 hci_pi(sk)->hdev = hdev;
780 break;
781
23500189
MH
782 case HCI_CHANNEL_USER:
783 if (hci_pi(sk)->hdev) {
784 err = -EALREADY;
785 goto done;
786 }
787
788 if (haddr.hci_dev == HCI_DEV_NONE) {
789 err = -EINVAL;
790 goto done;
791 }
792
10a8b86f 793 if (!capable(CAP_NET_ADMIN)) {
23500189
MH
794 err = -EPERM;
795 goto done;
796 }
797
798 hdev = hci_dev_get(haddr.hci_dev);
799 if (!hdev) {
800 err = -ENODEV;
801 goto done;
802 }
803
781f899f 804 if (test_bit(HCI_INIT, &hdev->flags) ||
d7a5a11d 805 hci_dev_test_flag(hdev, HCI_SETUP) ||
781f899f
MH
806 hci_dev_test_flag(hdev, HCI_CONFIG) ||
807 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
808 test_bit(HCI_UP, &hdev->flags))) {
23500189
MH
809 err = -EBUSY;
810 hci_dev_put(hdev);
811 goto done;
812 }
813
238be788 814 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
23500189
MH
815 err = -EUSERS;
816 hci_dev_put(hdev);
817 goto done;
818 }
819
0602a8ad 820 mgmt_index_removed(hdev);
23500189
MH
821
822 err = hci_dev_open(hdev->id);
823 if (err) {
781f899f
MH
824 if (err == -EALREADY) {
825 /* In case the transport is already up and
826 * running, clear the error here.
827 *
828 * This can happen when opening an user
829 * channel and HCI_AUTO_OFF grace period
830 * is still active.
831 */
832 err = 0;
833 } else {
834 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
835 mgmt_index_added(hdev);
836 hci_dev_put(hdev);
837 goto done;
838 }
23500189
MH
839 }
840
841 atomic_inc(&hdev->promisc);
842
843 hci_pi(sk)->hdev = hdev;
844 break;
845
cd82e61c
MH
846 case HCI_CHANNEL_MONITOR:
847 if (haddr.hci_dev != HCI_DEV_NONE) {
848 err = -EINVAL;
849 goto done;
850 }
851
852 if (!capable(CAP_NET_RAW)) {
853 err = -EPERM;
854 goto done;
855 }
856
50ebc055
MH
857 /* The monitor interface is restricted to CAP_NET_RAW
858 * capabilities and with that implicitly trusted.
859 */
860 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
861
cd82e61c
MH
862 send_monitor_replay(sk);
863
864 atomic_inc(&monitor_promisc);
865 break;
866
7cc2ade2 867 default:
801c1e8d
JH
868 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
869 err = -EINVAL;
870 goto done;
871 }
872
873 if (haddr.hci_dev != HCI_DEV_NONE) {
874 err = -EINVAL;
875 goto done;
876 }
877
1195fbb8
MH
878 /* Users with CAP_NET_ADMIN capabilities are allowed
879 * access to all management commands and events. For
880 * untrusted users the interface is restricted and
881 * also only untrusted events are sent.
50ebc055 882 */
1195fbb8
MH
883 if (capable(CAP_NET_ADMIN))
884 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
50ebc055 885
f9207338
MH
886 /* At the moment the index and unconfigured index events
887 * are enabled unconditionally. Setting them on each
888 * socket when binding keeps this functionality. They
889 * however might be cleared later and then sending of these
890 * events will be disabled, but that is then intentional.
f6b7712e
MH
891 *
892 * This also enables generic events that are safe to be
893 * received by untrusted users. Example for such events
894 * are changes to settings, class of device, name etc.
f9207338
MH
895 */
896 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
897 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
898 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
f6b7712e 899 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
f9207338 900 }
801c1e8d 901 break;
1da177e4
LT
902 }
903
7cc2ade2 904
0381101f 905 hci_pi(sk)->channel = haddr.hci_channel;
1da177e4
LT
906 sk->sk_state = BT_BOUND;
907
908done:
909 release_sock(sk);
910 return err;
911}
912
8fc9ced3
GP
913static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
914 int *addr_len, int peer)
1da177e4
LT
915{
916 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
917 struct sock *sk = sock->sk;
9d4b68b2
MH
918 struct hci_dev *hdev;
919 int err = 0;
1da177e4
LT
920
921 BT_DBG("sock %p sk %p", sock, sk);
922
06f43cbc
MH
923 if (peer)
924 return -EOPNOTSUPP;
925
1da177e4
LT
926 lock_sock(sk);
927
9d4b68b2
MH
928 hdev = hci_pi(sk)->hdev;
929 if (!hdev) {
930 err = -EBADFD;
931 goto done;
932 }
933
1da177e4
LT
934 *addr_len = sizeof(*haddr);
935 haddr->hci_family = AF_BLUETOOTH;
7b005bd3 936 haddr->hci_dev = hdev->id;
9d4b68b2 937 haddr->hci_channel= hci_pi(sk)->channel;
1da177e4 938
9d4b68b2 939done:
1da177e4 940 release_sock(sk);
9d4b68b2 941 return err;
1da177e4
LT
942}
943
6039aa73
GP
944static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
945 struct sk_buff *skb)
1da177e4
LT
946{
947 __u32 mask = hci_pi(sk)->cmsg_mask;
948
0d48d939
MH
949 if (mask & HCI_CMSG_DIR) {
950 int incoming = bt_cb(skb)->incoming;
8fc9ced3
GP
951 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
952 &incoming);
0d48d939 953 }
1da177e4 954
a61bbcf2 955 if (mask & HCI_CMSG_TSTAMP) {
f6e623a6
JFS
956#ifdef CONFIG_COMPAT
957 struct compat_timeval ctv;
958#endif
a61bbcf2 959 struct timeval tv;
767c5eb5
MH
960 void *data;
961 int len;
a61bbcf2
PM
962
963 skb_get_timestamp(skb, &tv);
767c5eb5 964
1da97f83
DM
965 data = &tv;
966 len = sizeof(tv);
967#ifdef CONFIG_COMPAT
da88cea1
L
968 if (!COMPAT_USE_64BIT_TIME &&
969 (msg->msg_flags & MSG_CMSG_COMPAT)) {
767c5eb5
MH
970 ctv.tv_sec = tv.tv_sec;
971 ctv.tv_usec = tv.tv_usec;
972 data = &ctv;
973 len = sizeof(ctv);
767c5eb5 974 }
1da97f83 975#endif
767c5eb5
MH
976
977 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
a61bbcf2 978 }
1da177e4 979}
8e87d142 980
1b784140
YX
981static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
982 int flags)
1da177e4
LT
983{
984 int noblock = flags & MSG_DONTWAIT;
985 struct sock *sk = sock->sk;
986 struct sk_buff *skb;
987 int copied, err;
988
989 BT_DBG("sock %p, sk %p", sock, sk);
990
991 if (flags & (MSG_OOB))
992 return -EOPNOTSUPP;
993
994 if (sk->sk_state == BT_CLOSED)
995 return 0;
996
70f23020
AE
997 skb = skb_recv_datagram(sk, flags, noblock, &err);
998 if (!skb)
1da177e4
LT
999 return err;
1000
1da177e4
LT
1001 copied = skb->len;
1002 if (len < copied) {
1003 msg->msg_flags |= MSG_TRUNC;
1004 copied = len;
1005 }
1006
badff6d0 1007 skb_reset_transport_header(skb);
51f3d02b 1008 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1da177e4 1009
3a208627
MH
1010 switch (hci_pi(sk)->channel) {
1011 case HCI_CHANNEL_RAW:
1012 hci_sock_cmsg(sk, msg, skb);
1013 break;
23500189 1014 case HCI_CHANNEL_USER:
cd82e61c
MH
1015 case HCI_CHANNEL_MONITOR:
1016 sock_recv_timestamp(msg, sk, skb);
1017 break;
801c1e8d
JH
1018 default:
1019 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1020 sock_recv_timestamp(msg, sk, skb);
1021 break;
3a208627 1022 }
1da177e4
LT
1023
1024 skb_free_datagram(sk, skb);
1025
1026 return err ? : copied;
1027}
1028
fa4335d7
JH
1029static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1030 struct msghdr *msg, size_t msglen)
1031{
1032 void *buf;
1033 u8 *cp;
1034 struct mgmt_hdr *hdr;
1035 u16 opcode, index, len;
1036 struct hci_dev *hdev = NULL;
1037 const struct hci_mgmt_handler *handler;
1038 bool var_len, no_hdev;
1039 int err;
1040
1041 BT_DBG("got %zu bytes", msglen);
1042
1043 if (msglen < sizeof(*hdr))
1044 return -EINVAL;
1045
1046 buf = kmalloc(msglen, GFP_KERNEL);
1047 if (!buf)
1048 return -ENOMEM;
1049
1050 if (memcpy_from_msg(buf, msg, msglen)) {
1051 err = -EFAULT;
1052 goto done;
1053 }
1054
1055 hdr = buf;
1056 opcode = __le16_to_cpu(hdr->opcode);
1057 index = __le16_to_cpu(hdr->index);
1058 len = __le16_to_cpu(hdr->len);
1059
1060 if (len != msglen - sizeof(*hdr)) {
1061 err = -EINVAL;
1062 goto done;
1063 }
1064
1065 if (opcode >= chan->handler_count ||
1066 chan->handlers[opcode].func == NULL) {
1067 BT_DBG("Unknown op %u", opcode);
1068 err = mgmt_cmd_status(sk, index, opcode,
1069 MGMT_STATUS_UNKNOWN_COMMAND);
1070 goto done;
1071 }
1072
1073 handler = &chan->handlers[opcode];
1074
1075 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1076 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1077 err = mgmt_cmd_status(sk, index, opcode,
1078 MGMT_STATUS_PERMISSION_DENIED);
1079 goto done;
1080 }
1081
1082 if (index != MGMT_INDEX_NONE) {
1083 hdev = hci_dev_get(index);
1084 if (!hdev) {
1085 err = mgmt_cmd_status(sk, index, opcode,
1086 MGMT_STATUS_INVALID_INDEX);
1087 goto done;
1088 }
1089
1090 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1091 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1092 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1093 err = mgmt_cmd_status(sk, index, opcode,
1094 MGMT_STATUS_INVALID_INDEX);
1095 goto done;
1096 }
1097
1098 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1099 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1100 err = mgmt_cmd_status(sk, index, opcode,
1101 MGMT_STATUS_INVALID_INDEX);
1102 goto done;
1103 }
1104 }
1105
1106 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1107 if (no_hdev != !hdev) {
1108 err = mgmt_cmd_status(sk, index, opcode,
1109 MGMT_STATUS_INVALID_INDEX);
1110 goto done;
1111 }
1112
1113 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1114 if ((var_len && len < handler->data_len) ||
1115 (!var_len && len != handler->data_len)) {
1116 err = mgmt_cmd_status(sk, index, opcode,
1117 MGMT_STATUS_INVALID_PARAMS);
1118 goto done;
1119 }
1120
1121 if (hdev && chan->hdev_init)
1122 chan->hdev_init(sk, hdev);
1123
1124 cp = buf + sizeof(*hdr);
1125
1126 err = handler->func(sk, hdev, cp, len);
1127 if (err < 0)
1128 goto done;
1129
1130 err = msglen;
1131
1132done:
1133 if (hdev)
1134 hci_dev_put(hdev);
1135
1136 kfree(buf);
1137 return err;
1138}
1139
1b784140
YX
1140static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1141 size_t len)
1da177e4
LT
1142{
1143 struct sock *sk = sock->sk;
801c1e8d 1144 struct hci_mgmt_chan *chan;
1da177e4
LT
1145 struct hci_dev *hdev;
1146 struct sk_buff *skb;
1147 int err;
1148
1149 BT_DBG("sock %p sk %p", sock, sk);
1150
1151 if (msg->msg_flags & MSG_OOB)
1152 return -EOPNOTSUPP;
1153
1154 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1155 return -EINVAL;
1156
1157 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1158 return -EINVAL;
1159
1160 lock_sock(sk);
1161
0381101f
JH
1162 switch (hci_pi(sk)->channel) {
1163 case HCI_CHANNEL_RAW:
23500189 1164 case HCI_CHANNEL_USER:
0381101f 1165 break;
cd82e61c
MH
1166 case HCI_CHANNEL_MONITOR:
1167 err = -EOPNOTSUPP;
1168 goto done;
0381101f 1169 default:
801c1e8d
JH
1170 mutex_lock(&mgmt_chan_list_lock);
1171 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1172 if (chan)
fa4335d7 1173 err = hci_mgmt_cmd(chan, sk, msg, len);
801c1e8d
JH
1174 else
1175 err = -EINVAL;
1176
1177 mutex_unlock(&mgmt_chan_list_lock);
0381101f
JH
1178 goto done;
1179 }
1180
70f23020
AE
1181 hdev = hci_pi(sk)->hdev;
1182 if (!hdev) {
1da177e4
LT
1183 err = -EBADFD;
1184 goto done;
1185 }
1186
7e21addc
MH
1187 if (!test_bit(HCI_UP, &hdev->flags)) {
1188 err = -ENETDOWN;
1189 goto done;
1190 }
1191
70f23020
AE
1192 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1193 if (!skb)
1da177e4
LT
1194 goto done;
1195
6ce8e9ce 1196 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1da177e4
LT
1197 err = -EFAULT;
1198 goto drop;
1199 }
1200
0d48d939 1201 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1da177e4 1202 skb_pull(skb, 1);
1da177e4 1203
1bc5ad16
MH
1204 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1205 /* No permission check is needed for user channel
1206 * since that gets enforced when binding the socket.
1207 *
1208 * However check that the packet type is valid.
1209 */
1210 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1211 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1212 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1213 err = -EINVAL;
1214 goto drop;
1215 }
1216
1217 skb_queue_tail(&hdev->raw_q, skb);
1218 queue_work(hdev->workqueue, &hdev->tx_work);
1219 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
83985319 1220 u16 opcode = get_unaligned_le16(skb->data);
1da177e4
LT
1221 u16 ogf = hci_opcode_ogf(opcode);
1222 u16 ocf = hci_opcode_ocf(opcode);
1223
1224 if (((ogf > HCI_SFLT_MAX_OGF) ||
3bb3c755
GP
1225 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1226 &hci_sec_filter.ocf_mask[ogf])) &&
1227 !capable(CAP_NET_RAW)) {
1da177e4
LT
1228 err = -EPERM;
1229 goto drop;
1230 }
1231
fee746b0 1232 if (ogf == 0x3f) {
1da177e4 1233 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 1234 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 1235 } else {
49c922bb 1236 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
1237 * single-command requests.
1238 */
db6e3e8d 1239 bt_cb(skb)->req.start = true;
11714b3d 1240
1da177e4 1241 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 1242 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
1243 }
1244 } else {
1245 if (!capable(CAP_NET_RAW)) {
1246 err = -EPERM;
1247 goto drop;
1248 }
1249
1250 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 1251 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
1252 }
1253
1254 err = len;
1255
1256done:
1257 release_sock(sk);
1258 return err;
1259
1260drop:
1261 kfree_skb(skb);
1262 goto done;
1263}
1264
8fc9ced3
GP
1265static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1266 char __user *optval, unsigned int len)
1da177e4
LT
1267{
1268 struct hci_ufilter uf = { .opcode = 0 };
1269 struct sock *sk = sock->sk;
1270 int err = 0, opt = 0;
1271
1272 BT_DBG("sk %p, opt %d", sk, optname);
1273
1274 lock_sock(sk);
1275
2f39cdb7 1276 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
c2371e80 1277 err = -EBADFD;
2f39cdb7
MH
1278 goto done;
1279 }
1280
1da177e4
LT
1281 switch (optname) {
1282 case HCI_DATA_DIR:
1283 if (get_user(opt, (int __user *)optval)) {
1284 err = -EFAULT;
1285 break;
1286 }
1287
1288 if (opt)
1289 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1290 else
1291 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1292 break;
1293
1294 case HCI_TIME_STAMP:
1295 if (get_user(opt, (int __user *)optval)) {
1296 err = -EFAULT;
1297 break;
1298 }
1299
1300 if (opt)
1301 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1302 else
1303 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1304 break;
1305
1306 case HCI_FILTER:
0878b666
MH
1307 {
1308 struct hci_filter *f = &hci_pi(sk)->filter;
1309
1310 uf.type_mask = f->type_mask;
1311 uf.opcode = f->opcode;
1312 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1313 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1314 }
1315
1da177e4
LT
1316 len = min_t(unsigned int, len, sizeof(uf));
1317 if (copy_from_user(&uf, optval, len)) {
1318 err = -EFAULT;
1319 break;
1320 }
1321
1322 if (!capable(CAP_NET_RAW)) {
1323 uf.type_mask &= hci_sec_filter.type_mask;
1324 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1325 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1326 }
1327
1328 {
1329 struct hci_filter *f = &hci_pi(sk)->filter;
1330
1331 f->type_mask = uf.type_mask;
1332 f->opcode = uf.opcode;
1333 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1334 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1335 }
8e87d142 1336 break;
1da177e4
LT
1337
1338 default:
1339 err = -ENOPROTOOPT;
1340 break;
1341 }
1342
2f39cdb7 1343done:
1da177e4
LT
1344 release_sock(sk);
1345 return err;
1346}
1347
8fc9ced3
GP
1348static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1349 char __user *optval, int __user *optlen)
1da177e4
LT
1350{
1351 struct hci_ufilter uf;
1352 struct sock *sk = sock->sk;
cedc5469
MH
1353 int len, opt, err = 0;
1354
1355 BT_DBG("sk %p, opt %d", sk, optname);
1da177e4
LT
1356
1357 if (get_user(len, optlen))
1358 return -EFAULT;
1359
cedc5469
MH
1360 lock_sock(sk);
1361
1362 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
c2371e80 1363 err = -EBADFD;
cedc5469
MH
1364 goto done;
1365 }
1366
1da177e4
LT
1367 switch (optname) {
1368 case HCI_DATA_DIR:
1369 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1370 opt = 1;
8e87d142 1371 else
1da177e4
LT
1372 opt = 0;
1373
1374 if (put_user(opt, optval))
cedc5469 1375 err = -EFAULT;
1da177e4
LT
1376 break;
1377
1378 case HCI_TIME_STAMP:
1379 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1380 opt = 1;
8e87d142 1381 else
1da177e4
LT
1382 opt = 0;
1383
1384 if (put_user(opt, optval))
cedc5469 1385 err = -EFAULT;
1da177e4
LT
1386 break;
1387
1388 case HCI_FILTER:
1389 {
1390 struct hci_filter *f = &hci_pi(sk)->filter;
1391
e15ca9a0 1392 memset(&uf, 0, sizeof(uf));
1da177e4
LT
1393 uf.type_mask = f->type_mask;
1394 uf.opcode = f->opcode;
1395 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1396 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1397 }
1398
1399 len = min_t(unsigned int, len, sizeof(uf));
1400 if (copy_to_user(optval, &uf, len))
cedc5469 1401 err = -EFAULT;
1da177e4
LT
1402 break;
1403
1404 default:
cedc5469 1405 err = -ENOPROTOOPT;
1da177e4
LT
1406 break;
1407 }
1408
cedc5469
MH
1409done:
1410 release_sock(sk);
1411 return err;
1da177e4
LT
1412}
1413
90ddc4f0 1414static const struct proto_ops hci_sock_ops = {
1da177e4
LT
1415 .family = PF_BLUETOOTH,
1416 .owner = THIS_MODULE,
1417 .release = hci_sock_release,
1418 .bind = hci_sock_bind,
1419 .getname = hci_sock_getname,
1420 .sendmsg = hci_sock_sendmsg,
1421 .recvmsg = hci_sock_recvmsg,
1422 .ioctl = hci_sock_ioctl,
1423 .poll = datagram_poll,
1424 .listen = sock_no_listen,
1425 .shutdown = sock_no_shutdown,
1426 .setsockopt = hci_sock_setsockopt,
1427 .getsockopt = hci_sock_getsockopt,
1428 .connect = sock_no_connect,
1429 .socketpair = sock_no_socketpair,
1430 .accept = sock_no_accept,
1431 .mmap = sock_no_mmap
1432};
1433
1434static struct proto hci_sk_proto = {
1435 .name = "HCI",
1436 .owner = THIS_MODULE,
1437 .obj_size = sizeof(struct hci_pinfo)
1438};
1439
3f378b68
EP
1440static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1441 int kern)
1da177e4
LT
1442{
1443 struct sock *sk;
1444
1445 BT_DBG("sock %p", sock);
1446
1447 if (sock->type != SOCK_RAW)
1448 return -ESOCKTNOSUPPORT;
1449
1450 sock->ops = &hci_sock_ops;
1451
11aa9c28 1452 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1da177e4
LT
1453 if (!sk)
1454 return -ENOMEM;
1455
1456 sock_init_data(sock, sk);
1457
1458 sock_reset_flag(sk, SOCK_ZAPPED);
1459
1460 sk->sk_protocol = protocol;
1461
1462 sock->state = SS_UNCONNECTED;
1463 sk->sk_state = BT_OPEN;
1464
1465 bt_sock_link(&hci_sk_list, sk);
1466 return 0;
1467}
1468
ec1b4cf7 1469static const struct net_proto_family hci_sock_family_ops = {
1da177e4
LT
1470 .family = PF_BLUETOOTH,
1471 .owner = THIS_MODULE,
1472 .create = hci_sock_create,
1473};
1474
1da177e4
LT
1475int __init hci_sock_init(void)
1476{
1477 int err;
1478
b0a8e282
MH
1479 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1480
1da177e4
LT
1481 err = proto_register(&hci_sk_proto, 0);
1482 if (err < 0)
1483 return err;
1484
1485 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
f7c86637
MY
1486 if (err < 0) {
1487 BT_ERR("HCI socket registration failed");
1da177e4 1488 goto error;
f7c86637
MY
1489 }
1490
b0316615 1491 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
f7c86637
MY
1492 if (err < 0) {
1493 BT_ERR("Failed to create HCI proc file");
1494 bt_sock_unregister(BTPROTO_HCI);
1495 goto error;
1496 }
1da177e4 1497
1da177e4
LT
1498 BT_INFO("HCI socket layer initialized");
1499
1500 return 0;
1501
1502error:
1da177e4
LT
1503 proto_unregister(&hci_sk_proto);
1504 return err;
1505}
1506
b7440a14 1507void hci_sock_cleanup(void)
1da177e4 1508{
f7c86637 1509 bt_procfs_cleanup(&init_net, "hci");
5e9d7f86 1510 bt_sock_unregister(BTPROTO_HCI);
1da177e4 1511 proto_unregister(&hci_sk_proto);
1da177e4 1512}
This page took 0.838097 seconds and 5 git commands to generate.