2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "mgmt_util.h"
37 static LIST_HEAD(mgmt_chan_list
);
38 static DEFINE_MUTEX(mgmt_chan_list_lock
);
40 static atomic_t monitor_promisc
= ATOMIC_INIT(0);
42 /* ----- HCI socket interface ----- */
45 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50 struct hci_filter filter
;
52 unsigned short channel
;
56 void hci_sock_set_flag(struct sock
*sk
, int nr
)
58 set_bit(nr
, &hci_pi(sk
)->flags
);
61 void hci_sock_clear_flag(struct sock
*sk
, int nr
)
63 clear_bit(nr
, &hci_pi(sk
)->flags
);
66 int hci_sock_test_flag(struct sock
*sk
, int nr
)
68 return test_bit(nr
, &hci_pi(sk
)->flags
);
71 unsigned short hci_sock_get_channel(struct sock
*sk
)
73 return hci_pi(sk
)->channel
;
76 static inline int hci_test_bit(int nr
, const void *addr
)
78 return *((const __u32
*) addr
+ (nr
>> 5)) & ((__u32
) 1 << (nr
& 31));
82 #define HCI_SFLT_MAX_OGF 5
84 struct hci_sec_filter
{
87 __u32 ocf_mask
[HCI_SFLT_MAX_OGF
+ 1][4];
90 static const struct hci_sec_filter hci_sec_filter
= {
94 { 0x1000d9fe, 0x0000b00c },
99 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
100 /* OGF_LINK_POLICY */
101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
106 /* OGF_STATUS_PARAM */
107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
111 static struct bt_sock_list hci_sk_list
= {
112 .lock
= __RW_LOCK_UNLOCKED(hci_sk_list
.lock
)
115 static bool is_filtered_packet(struct sock
*sk
, struct sk_buff
*skb
)
117 struct hci_filter
*flt
;
118 int flt_type
, flt_event
;
121 flt
= &hci_pi(sk
)->filter
;
123 if (bt_cb(skb
)->pkt_type
== HCI_VENDOR_PKT
)
126 flt_type
= bt_cb(skb
)->pkt_type
& HCI_FLT_TYPE_BITS
;
128 if (!test_bit(flt_type
, &flt
->type_mask
))
131 /* Extra filter for event packets only */
132 if (bt_cb(skb
)->pkt_type
!= HCI_EVENT_PKT
)
135 flt_event
= (*(__u8
*)skb
->data
& HCI_FLT_EVENT_BITS
);
137 if (!hci_test_bit(flt_event
, &flt
->event_mask
))
140 /* Check filter only when opcode is set */
144 if (flt_event
== HCI_EV_CMD_COMPLETE
&&
145 flt
->opcode
!= get_unaligned((__le16
*)(skb
->data
+ 3)))
148 if (flt_event
== HCI_EV_CMD_STATUS
&&
149 flt
->opcode
!= get_unaligned((__le16
*)(skb
->data
+ 4)))
155 /* Send frame to RAW socket */
156 void hci_send_to_sock(struct hci_dev
*hdev
, struct sk_buff
*skb
)
159 struct sk_buff
*skb_copy
= NULL
;
161 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
163 read_lock(&hci_sk_list
.lock
);
165 sk_for_each(sk
, &hci_sk_list
.head
) {
166 struct sk_buff
*nskb
;
168 if (sk
->sk_state
!= BT_BOUND
|| hci_pi(sk
)->hdev
!= hdev
)
171 /* Don't send frame to the socket it came from */
175 if (hci_pi(sk
)->channel
== HCI_CHANNEL_RAW
) {
176 if (is_filtered_packet(sk
, skb
))
178 } else if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
179 if (!bt_cb(skb
)->incoming
)
181 if (bt_cb(skb
)->pkt_type
!= HCI_EVENT_PKT
&&
182 bt_cb(skb
)->pkt_type
!= HCI_ACLDATA_PKT
&&
183 bt_cb(skb
)->pkt_type
!= HCI_SCODATA_PKT
)
186 /* Don't send frame to other channel types */
191 /* Create a private copy with headroom */
192 skb_copy
= __pskb_copy_fclone(skb
, 1, GFP_ATOMIC
, true);
196 /* Put type byte before the data */
197 memcpy(skb_push(skb_copy
, 1), &bt_cb(skb
)->pkt_type
, 1);
200 nskb
= skb_clone(skb_copy
, GFP_ATOMIC
);
204 if (sock_queue_rcv_skb(sk
, nskb
))
208 read_unlock(&hci_sk_list
.lock
);
213 /* Send frame to sockets with specific channel */
214 void hci_send_to_channel(unsigned short channel
, struct sk_buff
*skb
,
215 int flag
, struct sock
*skip_sk
)
219 BT_DBG("channel %u len %d", channel
, skb
->len
);
221 read_lock(&hci_sk_list
.lock
);
223 sk_for_each(sk
, &hci_sk_list
.head
) {
224 struct sk_buff
*nskb
;
226 /* Ignore socket without the flag set */
227 if (!hci_sock_test_flag(sk
, flag
))
230 /* Skip the original socket */
234 if (sk
->sk_state
!= BT_BOUND
)
237 if (hci_pi(sk
)->channel
!= channel
)
240 nskb
= skb_clone(skb
, GFP_ATOMIC
);
244 if (sock_queue_rcv_skb(sk
, nskb
))
248 read_unlock(&hci_sk_list
.lock
);
251 /* Send frame to monitor socket */
252 void hci_send_to_monitor(struct hci_dev
*hdev
, struct sk_buff
*skb
)
254 struct sk_buff
*skb_copy
= NULL
;
255 struct hci_mon_hdr
*hdr
;
258 if (!atomic_read(&monitor_promisc
))
261 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
263 switch (bt_cb(skb
)->pkt_type
) {
264 case HCI_COMMAND_PKT
:
265 opcode
= cpu_to_le16(HCI_MON_COMMAND_PKT
);
268 opcode
= cpu_to_le16(HCI_MON_EVENT_PKT
);
270 case HCI_ACLDATA_PKT
:
271 if (bt_cb(skb
)->incoming
)
272 opcode
= cpu_to_le16(HCI_MON_ACL_RX_PKT
);
274 opcode
= cpu_to_le16(HCI_MON_ACL_TX_PKT
);
276 case HCI_SCODATA_PKT
:
277 if (bt_cb(skb
)->incoming
)
278 opcode
= cpu_to_le16(HCI_MON_SCO_RX_PKT
);
280 opcode
= cpu_to_le16(HCI_MON_SCO_TX_PKT
);
286 /* Create a private copy with headroom */
287 skb_copy
= __pskb_copy_fclone(skb
, HCI_MON_HDR_SIZE
, GFP_ATOMIC
, true);
291 /* Put header before the data */
292 hdr
= (void *) skb_push(skb_copy
, HCI_MON_HDR_SIZE
);
293 hdr
->opcode
= opcode
;
294 hdr
->index
= cpu_to_le16(hdev
->id
);
295 hdr
->len
= cpu_to_le16(skb
->len
);
297 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb_copy
,
298 HCI_SOCK_TRUSTED
, NULL
);
302 static struct sk_buff
*create_monitor_event(struct hci_dev
*hdev
, int event
)
304 struct hci_mon_hdr
*hdr
;
305 struct hci_mon_new_index
*ni
;
311 skb
= bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE
, GFP_ATOMIC
);
315 ni
= (void *) skb_put(skb
, HCI_MON_NEW_INDEX_SIZE
);
316 ni
->type
= hdev
->dev_type
;
318 bacpy(&ni
->bdaddr
, &hdev
->bdaddr
);
319 memcpy(ni
->name
, hdev
->name
, 8);
321 opcode
= cpu_to_le16(HCI_MON_NEW_INDEX
);
325 skb
= bt_skb_alloc(0, GFP_ATOMIC
);
329 opcode
= cpu_to_le16(HCI_MON_DEL_INDEX
);
336 __net_timestamp(skb
);
338 hdr
= (void *) skb_push(skb
, HCI_MON_HDR_SIZE
);
339 hdr
->opcode
= opcode
;
340 hdr
->index
= cpu_to_le16(hdev
->id
);
341 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
346 static void send_monitor_replay(struct sock
*sk
)
348 struct hci_dev
*hdev
;
350 read_lock(&hci_dev_list_lock
);
352 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
355 skb
= create_monitor_event(hdev
, HCI_DEV_REG
);
359 if (sock_queue_rcv_skb(sk
, skb
))
363 read_unlock(&hci_dev_list_lock
);
366 /* Generate internal stack event */
367 static void hci_si_event(struct hci_dev
*hdev
, int type
, int dlen
, void *data
)
369 struct hci_event_hdr
*hdr
;
370 struct hci_ev_stack_internal
*ev
;
373 skb
= bt_skb_alloc(HCI_EVENT_HDR_SIZE
+ sizeof(*ev
) + dlen
, GFP_ATOMIC
);
377 hdr
= (void *) skb_put(skb
, HCI_EVENT_HDR_SIZE
);
378 hdr
->evt
= HCI_EV_STACK_INTERNAL
;
379 hdr
->plen
= sizeof(*ev
) + dlen
;
381 ev
= (void *) skb_put(skb
, sizeof(*ev
) + dlen
);
383 memcpy(ev
->data
, data
, dlen
);
385 bt_cb(skb
)->incoming
= 1;
386 __net_timestamp(skb
);
388 bt_cb(skb
)->pkt_type
= HCI_EVENT_PKT
;
389 hci_send_to_sock(hdev
, skb
);
393 void hci_sock_dev_event(struct hci_dev
*hdev
, int event
)
395 struct hci_ev_si_device ev
;
397 BT_DBG("hdev %s event %d", hdev
->name
, event
);
399 /* Send event to monitor */
400 if (atomic_read(&monitor_promisc
)) {
403 skb
= create_monitor_event(hdev
, event
);
405 hci_send_to_channel(HCI_CHANNEL_MONITOR
, skb
,
406 HCI_SOCK_TRUSTED
, NULL
);
411 /* Send event to sockets */
413 ev
.dev_id
= hdev
->id
;
414 hci_si_event(NULL
, HCI_EV_SI_DEVICE
, sizeof(ev
), &ev
);
416 if (event
== HCI_DEV_UNREG
) {
419 /* Detach sockets from device */
420 read_lock(&hci_sk_list
.lock
);
421 sk_for_each(sk
, &hci_sk_list
.head
) {
422 bh_lock_sock_nested(sk
);
423 if (hci_pi(sk
)->hdev
== hdev
) {
424 hci_pi(sk
)->hdev
= NULL
;
426 sk
->sk_state
= BT_OPEN
;
427 sk
->sk_state_change(sk
);
433 read_unlock(&hci_sk_list
.lock
);
437 static struct hci_mgmt_chan
*__hci_mgmt_chan_find(unsigned short channel
)
439 struct hci_mgmt_chan
*c
;
441 list_for_each_entry(c
, &mgmt_chan_list
, list
) {
442 if (c
->channel
== channel
)
449 static struct hci_mgmt_chan
*hci_mgmt_chan_find(unsigned short channel
)
451 struct hci_mgmt_chan
*c
;
453 mutex_lock(&mgmt_chan_list_lock
);
454 c
= __hci_mgmt_chan_find(channel
);
455 mutex_unlock(&mgmt_chan_list_lock
);
460 int hci_mgmt_chan_register(struct hci_mgmt_chan
*c
)
462 if (c
->channel
< HCI_CHANNEL_CONTROL
)
465 mutex_lock(&mgmt_chan_list_lock
);
466 if (__hci_mgmt_chan_find(c
->channel
)) {
467 mutex_unlock(&mgmt_chan_list_lock
);
471 list_add_tail(&c
->list
, &mgmt_chan_list
);
473 mutex_unlock(&mgmt_chan_list_lock
);
477 EXPORT_SYMBOL(hci_mgmt_chan_register
);
479 void hci_mgmt_chan_unregister(struct hci_mgmt_chan
*c
)
481 mutex_lock(&mgmt_chan_list_lock
);
483 mutex_unlock(&mgmt_chan_list_lock
);
485 EXPORT_SYMBOL(hci_mgmt_chan_unregister
);
487 static int hci_sock_release(struct socket
*sock
)
489 struct sock
*sk
= sock
->sk
;
490 struct hci_dev
*hdev
;
492 BT_DBG("sock %p sk %p", sock
, sk
);
497 hdev
= hci_pi(sk
)->hdev
;
499 if (hci_pi(sk
)->channel
== HCI_CHANNEL_MONITOR
)
500 atomic_dec(&monitor_promisc
);
502 bt_sock_unlink(&hci_sk_list
, sk
);
505 if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
506 /* When releasing an user channel exclusive access,
507 * call hci_dev_do_close directly instead of calling
508 * hci_dev_close to ensure the exclusive access will
509 * be released and the controller brought back down.
511 * The checking of HCI_AUTO_OFF is not needed in this
512 * case since it will have been cleared already when
513 * opening the user channel.
515 hci_dev_do_close(hdev
);
516 hci_dev_clear_flag(hdev
, HCI_USER_CHANNEL
);
517 mgmt_index_added(hdev
);
520 atomic_dec(&hdev
->promisc
);
526 skb_queue_purge(&sk
->sk_receive_queue
);
527 skb_queue_purge(&sk
->sk_write_queue
);
533 static int hci_sock_blacklist_add(struct hci_dev
*hdev
, void __user
*arg
)
538 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
543 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &bdaddr
, BDADDR_BREDR
);
545 hci_dev_unlock(hdev
);
550 static int hci_sock_blacklist_del(struct hci_dev
*hdev
, void __user
*arg
)
555 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
560 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &bdaddr
, BDADDR_BREDR
);
562 hci_dev_unlock(hdev
);
567 /* Ioctls that require bound socket */
568 static int hci_sock_bound_ioctl(struct sock
*sk
, unsigned int cmd
,
571 struct hci_dev
*hdev
= hci_pi(sk
)->hdev
;
576 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
579 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
582 if (hdev
->dev_type
!= HCI_BREDR
)
587 if (!capable(CAP_NET_ADMIN
))
592 return hci_get_conn_info(hdev
, (void __user
*) arg
);
595 return hci_get_auth_info(hdev
, (void __user
*) arg
);
598 if (!capable(CAP_NET_ADMIN
))
600 return hci_sock_blacklist_add(hdev
, (void __user
*) arg
);
603 if (!capable(CAP_NET_ADMIN
))
605 return hci_sock_blacklist_del(hdev
, (void __user
*) arg
);
611 static int hci_sock_ioctl(struct socket
*sock
, unsigned int cmd
,
614 void __user
*argp
= (void __user
*) arg
;
615 struct sock
*sk
= sock
->sk
;
618 BT_DBG("cmd %x arg %lx", cmd
, arg
);
622 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
631 return hci_get_dev_list(argp
);
634 return hci_get_dev_info(argp
);
637 return hci_get_conn_list(argp
);
640 if (!capable(CAP_NET_ADMIN
))
642 return hci_dev_open(arg
);
645 if (!capable(CAP_NET_ADMIN
))
647 return hci_dev_close(arg
);
650 if (!capable(CAP_NET_ADMIN
))
652 return hci_dev_reset(arg
);
655 if (!capable(CAP_NET_ADMIN
))
657 return hci_dev_reset_stat(arg
);
667 if (!capable(CAP_NET_ADMIN
))
669 return hci_dev_cmd(cmd
, argp
);
672 return hci_inquiry(argp
);
677 err
= hci_sock_bound_ioctl(sk
, cmd
, arg
);
684 static int hci_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
687 struct sockaddr_hci haddr
;
688 struct sock
*sk
= sock
->sk
;
689 struct hci_dev
*hdev
= NULL
;
692 BT_DBG("sock %p sk %p", sock
, sk
);
697 memset(&haddr
, 0, sizeof(haddr
));
698 len
= min_t(unsigned int, sizeof(haddr
), addr_len
);
699 memcpy(&haddr
, addr
, len
);
701 if (haddr
.hci_family
!= AF_BLUETOOTH
)
706 if (sk
->sk_state
== BT_BOUND
) {
711 switch (haddr
.hci_channel
) {
712 case HCI_CHANNEL_RAW
:
713 if (hci_pi(sk
)->hdev
) {
718 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
719 hdev
= hci_dev_get(haddr
.hci_dev
);
725 atomic_inc(&hdev
->promisc
);
728 hci_pi(sk
)->hdev
= hdev
;
731 case HCI_CHANNEL_USER
:
732 if (hci_pi(sk
)->hdev
) {
737 if (haddr
.hci_dev
== HCI_DEV_NONE
) {
742 if (!capable(CAP_NET_ADMIN
)) {
747 hdev
= hci_dev_get(haddr
.hci_dev
);
753 if (test_bit(HCI_INIT
, &hdev
->flags
) ||
754 hci_dev_test_flag(hdev
, HCI_SETUP
) ||
755 hci_dev_test_flag(hdev
, HCI_CONFIG
) ||
756 (!hci_dev_test_flag(hdev
, HCI_AUTO_OFF
) &&
757 test_bit(HCI_UP
, &hdev
->flags
))) {
763 if (hci_dev_test_and_set_flag(hdev
, HCI_USER_CHANNEL
)) {
769 mgmt_index_removed(hdev
);
771 err
= hci_dev_open(hdev
->id
);
773 if (err
== -EALREADY
) {
774 /* In case the transport is already up and
775 * running, clear the error here.
777 * This can happen when opening an user
778 * channel and HCI_AUTO_OFF grace period
783 hci_dev_clear_flag(hdev
, HCI_USER_CHANNEL
);
784 mgmt_index_added(hdev
);
790 atomic_inc(&hdev
->promisc
);
792 hci_pi(sk
)->hdev
= hdev
;
795 case HCI_CHANNEL_MONITOR
:
796 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
801 if (!capable(CAP_NET_RAW
)) {
806 /* The monitor interface is restricted to CAP_NET_RAW
807 * capabilities and with that implicitly trusted.
809 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
811 send_monitor_replay(sk
);
813 atomic_inc(&monitor_promisc
);
817 if (!hci_mgmt_chan_find(haddr
.hci_channel
)) {
822 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
827 /* Users with CAP_NET_ADMIN capabilities are allowed
828 * access to all management commands and events. For
829 * untrusted users the interface is restricted and
830 * also only untrusted events are sent.
832 if (capable(CAP_NET_ADMIN
))
833 hci_sock_set_flag(sk
, HCI_SOCK_TRUSTED
);
835 /* At the moment the index and unconfigured index events
836 * are enabled unconditionally. Setting them on each
837 * socket when binding keeps this functionality. They
838 * however might be cleared later and then sending of these
839 * events will be disabled, but that is then intentional.
841 * This also enables generic events that are safe to be
842 * received by untrusted users. Example for such events
843 * are changes to settings, class of device, name etc.
845 if (haddr
.hci_channel
== HCI_CHANNEL_CONTROL
) {
846 hci_sock_set_flag(sk
, HCI_MGMT_INDEX_EVENTS
);
847 hci_sock_set_flag(sk
, HCI_MGMT_UNCONF_INDEX_EVENTS
);
848 hci_sock_set_flag(sk
, HCI_MGMT_GENERIC_EVENTS
);
854 hci_pi(sk
)->channel
= haddr
.hci_channel
;
855 sk
->sk_state
= BT_BOUND
;
862 static int hci_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
863 int *addr_len
, int peer
)
865 struct sockaddr_hci
*haddr
= (struct sockaddr_hci
*) addr
;
866 struct sock
*sk
= sock
->sk
;
867 struct hci_dev
*hdev
;
870 BT_DBG("sock %p sk %p", sock
, sk
);
877 hdev
= hci_pi(sk
)->hdev
;
883 *addr_len
= sizeof(*haddr
);
884 haddr
->hci_family
= AF_BLUETOOTH
;
885 haddr
->hci_dev
= hdev
->id
;
886 haddr
->hci_channel
= hci_pi(sk
)->channel
;
893 static void hci_sock_cmsg(struct sock
*sk
, struct msghdr
*msg
,
896 __u32 mask
= hci_pi(sk
)->cmsg_mask
;
898 if (mask
& HCI_CMSG_DIR
) {
899 int incoming
= bt_cb(skb
)->incoming
;
900 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_DIR
, sizeof(incoming
),
904 if (mask
& HCI_CMSG_TSTAMP
) {
906 struct compat_timeval ctv
;
912 skb_get_timestamp(skb
, &tv
);
917 if (!COMPAT_USE_64BIT_TIME
&&
918 (msg
->msg_flags
& MSG_CMSG_COMPAT
)) {
919 ctv
.tv_sec
= tv
.tv_sec
;
920 ctv
.tv_usec
= tv
.tv_usec
;
926 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_TSTAMP
, len
, data
);
930 static int hci_sock_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
,
933 int noblock
= flags
& MSG_DONTWAIT
;
934 struct sock
*sk
= sock
->sk
;
938 BT_DBG("sock %p, sk %p", sock
, sk
);
940 if (flags
& (MSG_OOB
))
943 if (sk
->sk_state
== BT_CLOSED
)
946 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
952 msg
->msg_flags
|= MSG_TRUNC
;
956 skb_reset_transport_header(skb
);
957 err
= skb_copy_datagram_msg(skb
, 0, msg
, copied
);
959 switch (hci_pi(sk
)->channel
) {
960 case HCI_CHANNEL_RAW
:
961 hci_sock_cmsg(sk
, msg
, skb
);
963 case HCI_CHANNEL_USER
:
964 case HCI_CHANNEL_MONITOR
:
965 sock_recv_timestamp(msg
, sk
, skb
);
968 if (hci_mgmt_chan_find(hci_pi(sk
)->channel
))
969 sock_recv_timestamp(msg
, sk
, skb
);
973 skb_free_datagram(sk
, skb
);
975 return err
? : copied
;
978 static int hci_mgmt_cmd(struct hci_mgmt_chan
*chan
, struct sock
*sk
,
979 struct msghdr
*msg
, size_t msglen
)
983 struct mgmt_hdr
*hdr
;
984 u16 opcode
, index
, len
;
985 struct hci_dev
*hdev
= NULL
;
986 const struct hci_mgmt_handler
*handler
;
987 bool var_len
, no_hdev
;
990 BT_DBG("got %zu bytes", msglen
);
992 if (msglen
< sizeof(*hdr
))
995 buf
= kmalloc(msglen
, GFP_KERNEL
);
999 if (memcpy_from_msg(buf
, msg
, msglen
)) {
1005 opcode
= __le16_to_cpu(hdr
->opcode
);
1006 index
= __le16_to_cpu(hdr
->index
);
1007 len
= __le16_to_cpu(hdr
->len
);
1009 if (len
!= msglen
- sizeof(*hdr
)) {
1014 if (opcode
>= chan
->handler_count
||
1015 chan
->handlers
[opcode
].func
== NULL
) {
1016 BT_DBG("Unknown op %u", opcode
);
1017 err
= mgmt_cmd_status(sk
, index
, opcode
,
1018 MGMT_STATUS_UNKNOWN_COMMAND
);
1022 handler
= &chan
->handlers
[opcode
];
1024 if (!hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
) &&
1025 !(handler
->flags
& HCI_MGMT_UNTRUSTED
)) {
1026 err
= mgmt_cmd_status(sk
, index
, opcode
,
1027 MGMT_STATUS_PERMISSION_DENIED
);
1031 if (index
!= MGMT_INDEX_NONE
) {
1032 hdev
= hci_dev_get(index
);
1034 err
= mgmt_cmd_status(sk
, index
, opcode
,
1035 MGMT_STATUS_INVALID_INDEX
);
1039 if (hci_dev_test_flag(hdev
, HCI_SETUP
) ||
1040 hci_dev_test_flag(hdev
, HCI_CONFIG
) ||
1041 hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1042 err
= mgmt_cmd_status(sk
, index
, opcode
,
1043 MGMT_STATUS_INVALID_INDEX
);
1047 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1048 !(handler
->flags
& HCI_MGMT_UNCONFIGURED
)) {
1049 err
= mgmt_cmd_status(sk
, index
, opcode
,
1050 MGMT_STATUS_INVALID_INDEX
);
1055 no_hdev
= (handler
->flags
& HCI_MGMT_NO_HDEV
);
1056 if (no_hdev
!= !hdev
) {
1057 err
= mgmt_cmd_status(sk
, index
, opcode
,
1058 MGMT_STATUS_INVALID_INDEX
);
1062 var_len
= (handler
->flags
& HCI_MGMT_VAR_LEN
);
1063 if ((var_len
&& len
< handler
->data_len
) ||
1064 (!var_len
&& len
!= handler
->data_len
)) {
1065 err
= mgmt_cmd_status(sk
, index
, opcode
,
1066 MGMT_STATUS_INVALID_PARAMS
);
1070 if (hdev
&& chan
->hdev_init
)
1071 chan
->hdev_init(sk
, hdev
);
1073 cp
= buf
+ sizeof(*hdr
);
1075 err
= handler
->func(sk
, hdev
, cp
, len
);
1089 static int hci_sock_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1092 struct sock
*sk
= sock
->sk
;
1093 struct hci_mgmt_chan
*chan
;
1094 struct hci_dev
*hdev
;
1095 struct sk_buff
*skb
;
1098 BT_DBG("sock %p sk %p", sock
, sk
);
1100 if (msg
->msg_flags
& MSG_OOB
)
1103 if (msg
->msg_flags
& ~(MSG_DONTWAIT
|MSG_NOSIGNAL
|MSG_ERRQUEUE
))
1106 if (len
< 4 || len
> HCI_MAX_FRAME_SIZE
)
1111 switch (hci_pi(sk
)->channel
) {
1112 case HCI_CHANNEL_RAW
:
1113 case HCI_CHANNEL_USER
:
1115 case HCI_CHANNEL_MONITOR
:
1119 mutex_lock(&mgmt_chan_list_lock
);
1120 chan
= __hci_mgmt_chan_find(hci_pi(sk
)->channel
);
1122 err
= hci_mgmt_cmd(chan
, sk
, msg
, len
);
1126 mutex_unlock(&mgmt_chan_list_lock
);
1130 hdev
= hci_pi(sk
)->hdev
;
1136 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
1141 skb
= bt_skb_send_alloc(sk
, len
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1145 if (memcpy_from_msg(skb_put(skb
, len
), msg
, len
)) {
1150 bt_cb(skb
)->pkt_type
= *((unsigned char *) skb
->data
);
1153 if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
1154 /* No permission check is needed for user channel
1155 * since that gets enforced when binding the socket.
1157 * However check that the packet type is valid.
1159 if (bt_cb(skb
)->pkt_type
!= HCI_COMMAND_PKT
&&
1160 bt_cb(skb
)->pkt_type
!= HCI_ACLDATA_PKT
&&
1161 bt_cb(skb
)->pkt_type
!= HCI_SCODATA_PKT
) {
1166 skb_queue_tail(&hdev
->raw_q
, skb
);
1167 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1168 } else if (bt_cb(skb
)->pkt_type
== HCI_COMMAND_PKT
) {
1169 u16 opcode
= get_unaligned_le16(skb
->data
);
1170 u16 ogf
= hci_opcode_ogf(opcode
);
1171 u16 ocf
= hci_opcode_ocf(opcode
);
1173 if (((ogf
> HCI_SFLT_MAX_OGF
) ||
1174 !hci_test_bit(ocf
& HCI_FLT_OCF_BITS
,
1175 &hci_sec_filter
.ocf_mask
[ogf
])) &&
1176 !capable(CAP_NET_RAW
)) {
1182 skb_queue_tail(&hdev
->raw_q
, skb
);
1183 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1185 /* Stand-alone HCI commands must be flagged as
1186 * single-command requests.
1188 bt_cb(skb
)->req
.start
= true;
1190 skb_queue_tail(&hdev
->cmd_q
, skb
);
1191 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1194 if (!capable(CAP_NET_RAW
)) {
1199 skb_queue_tail(&hdev
->raw_q
, skb
);
1200 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1214 static int hci_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1215 char __user
*optval
, unsigned int len
)
1217 struct hci_ufilter uf
= { .opcode
= 0 };
1218 struct sock
*sk
= sock
->sk
;
1219 int err
= 0, opt
= 0;
1221 BT_DBG("sk %p, opt %d", sk
, optname
);
1225 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
1232 if (get_user(opt
, (int __user
*)optval
)) {
1238 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_DIR
;
1240 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_DIR
;
1243 case HCI_TIME_STAMP
:
1244 if (get_user(opt
, (int __user
*)optval
)) {
1250 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_TSTAMP
;
1252 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_TSTAMP
;
1257 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1259 uf
.type_mask
= f
->type_mask
;
1260 uf
.opcode
= f
->opcode
;
1261 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
1262 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
1265 len
= min_t(unsigned int, len
, sizeof(uf
));
1266 if (copy_from_user(&uf
, optval
, len
)) {
1271 if (!capable(CAP_NET_RAW
)) {
1272 uf
.type_mask
&= hci_sec_filter
.type_mask
;
1273 uf
.event_mask
[0] &= *((u32
*) hci_sec_filter
.event_mask
+ 0);
1274 uf
.event_mask
[1] &= *((u32
*) hci_sec_filter
.event_mask
+ 1);
1278 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1280 f
->type_mask
= uf
.type_mask
;
1281 f
->opcode
= uf
.opcode
;
1282 *((u32
*) f
->event_mask
+ 0) = uf
.event_mask
[0];
1283 *((u32
*) f
->event_mask
+ 1) = uf
.event_mask
[1];
1297 static int hci_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1298 char __user
*optval
, int __user
*optlen
)
1300 struct hci_ufilter uf
;
1301 struct sock
*sk
= sock
->sk
;
1302 int len
, opt
, err
= 0;
1304 BT_DBG("sk %p, opt %d", sk
, optname
);
1306 if (get_user(len
, optlen
))
1311 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
1318 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_DIR
)
1323 if (put_user(opt
, optval
))
1327 case HCI_TIME_STAMP
:
1328 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_TSTAMP
)
1333 if (put_user(opt
, optval
))
1339 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1341 memset(&uf
, 0, sizeof(uf
));
1342 uf
.type_mask
= f
->type_mask
;
1343 uf
.opcode
= f
->opcode
;
1344 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
1345 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
1348 len
= min_t(unsigned int, len
, sizeof(uf
));
1349 if (copy_to_user(optval
, &uf
, len
))
1363 static const struct proto_ops hci_sock_ops
= {
1364 .family
= PF_BLUETOOTH
,
1365 .owner
= THIS_MODULE
,
1366 .release
= hci_sock_release
,
1367 .bind
= hci_sock_bind
,
1368 .getname
= hci_sock_getname
,
1369 .sendmsg
= hci_sock_sendmsg
,
1370 .recvmsg
= hci_sock_recvmsg
,
1371 .ioctl
= hci_sock_ioctl
,
1372 .poll
= datagram_poll
,
1373 .listen
= sock_no_listen
,
1374 .shutdown
= sock_no_shutdown
,
1375 .setsockopt
= hci_sock_setsockopt
,
1376 .getsockopt
= hci_sock_getsockopt
,
1377 .connect
= sock_no_connect
,
1378 .socketpair
= sock_no_socketpair
,
1379 .accept
= sock_no_accept
,
1380 .mmap
= sock_no_mmap
1383 static struct proto hci_sk_proto
= {
1385 .owner
= THIS_MODULE
,
1386 .obj_size
= sizeof(struct hci_pinfo
)
1389 static int hci_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
1394 BT_DBG("sock %p", sock
);
1396 if (sock
->type
!= SOCK_RAW
)
1397 return -ESOCKTNOSUPPORT
;
1399 sock
->ops
= &hci_sock_ops
;
1401 sk
= sk_alloc(net
, PF_BLUETOOTH
, GFP_ATOMIC
, &hci_sk_proto
, kern
);
1405 sock_init_data(sock
, sk
);
1407 sock_reset_flag(sk
, SOCK_ZAPPED
);
1409 sk
->sk_protocol
= protocol
;
1411 sock
->state
= SS_UNCONNECTED
;
1412 sk
->sk_state
= BT_OPEN
;
1414 bt_sock_link(&hci_sk_list
, sk
);
1418 static const struct net_proto_family hci_sock_family_ops
= {
1419 .family
= PF_BLUETOOTH
,
1420 .owner
= THIS_MODULE
,
1421 .create
= hci_sock_create
,
1424 int __init
hci_sock_init(void)
1428 BUILD_BUG_ON(sizeof(struct sockaddr_hci
) > sizeof(struct sockaddr
));
1430 err
= proto_register(&hci_sk_proto
, 0);
1434 err
= bt_sock_register(BTPROTO_HCI
, &hci_sock_family_ops
);
1436 BT_ERR("HCI socket registration failed");
1440 err
= bt_procfs_init(&init_net
, "hci", &hci_sk_list
, NULL
);
1442 BT_ERR("Failed to create HCI proc file");
1443 bt_sock_unregister(BTPROTO_HCI
);
1447 BT_INFO("HCI socket layer initialized");
1452 proto_unregister(&hci_sk_proto
);
1456 void hci_sock_cleanup(void)
1458 bt_procfs_cleanup(&init_net
, "hci");
1459 bt_sock_unregister(BTPROTO_HCI
);
1460 proto_unregister(&hci_sk_proto
);