2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
34 static atomic_t monitor_promisc
= ATOMIC_INIT(0);
36 /* ----- HCI socket interface ----- */
39 #define hci_pi(sk) ((struct hci_pinfo *) sk)
44 struct hci_filter filter
;
46 unsigned short channel
;
49 static inline int hci_test_bit(int nr
, void *addr
)
51 return *((__u32
*) addr
+ (nr
>> 5)) & ((__u32
) 1 << (nr
& 31));
55 #define HCI_SFLT_MAX_OGF 5
57 struct hci_sec_filter
{
60 __u32 ocf_mask
[HCI_SFLT_MAX_OGF
+ 1][4];
63 static const struct hci_sec_filter hci_sec_filter
= {
67 { 0x1000d9fe, 0x0000b00c },
72 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
74 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
76 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
78 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
79 /* OGF_STATUS_PARAM */
80 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
84 static struct bt_sock_list hci_sk_list
= {
85 .lock
= __RW_LOCK_UNLOCKED(hci_sk_list
.lock
)
88 static bool is_filtered_packet(struct sock
*sk
, struct sk_buff
*skb
)
90 struct hci_filter
*flt
;
91 int flt_type
, flt_event
;
94 flt
= &hci_pi(sk
)->filter
;
96 if (bt_cb(skb
)->pkt_type
== HCI_VENDOR_PKT
)
99 flt_type
= bt_cb(skb
)->pkt_type
& HCI_FLT_TYPE_BITS
;
101 if (!test_bit(flt_type
, &flt
->type_mask
))
104 /* Extra filter for event packets only */
105 if (bt_cb(skb
)->pkt_type
!= HCI_EVENT_PKT
)
108 flt_event
= (*(__u8
*)skb
->data
& HCI_FLT_EVENT_BITS
);
110 if (!hci_test_bit(flt_event
, &flt
->event_mask
))
113 /* Check filter only when opcode is set */
117 if (flt_event
== HCI_EV_CMD_COMPLETE
&&
118 flt
->opcode
!= get_unaligned((__le16
*)(skb
->data
+ 3)))
121 if (flt_event
== HCI_EV_CMD_STATUS
&&
122 flt
->opcode
!= get_unaligned((__le16
*)(skb
->data
+ 4)))
128 /* Send frame to RAW socket */
129 void hci_send_to_sock(struct hci_dev
*hdev
, struct sk_buff
*skb
)
132 struct sk_buff
*skb_copy
= NULL
;
134 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
136 read_lock(&hci_sk_list
.lock
);
138 sk_for_each(sk
, &hci_sk_list
.head
) {
139 struct sk_buff
*nskb
;
141 if (sk
->sk_state
!= BT_BOUND
|| hci_pi(sk
)->hdev
!= hdev
)
144 /* Don't send frame to the socket it came from */
148 if (hci_pi(sk
)->channel
== HCI_CHANNEL_RAW
) {
149 if (is_filtered_packet(sk
, skb
))
151 } else if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
152 if (!bt_cb(skb
)->incoming
)
154 if (bt_cb(skb
)->pkt_type
!= HCI_EVENT_PKT
&&
155 bt_cb(skb
)->pkt_type
!= HCI_ACLDATA_PKT
&&
156 bt_cb(skb
)->pkt_type
!= HCI_SCODATA_PKT
)
159 /* Don't send frame to other channel types */
164 /* Create a private copy with headroom */
165 skb_copy
= __pskb_copy_fclone(skb
, 1, GFP_ATOMIC
, true);
169 /* Put type byte before the data */
170 memcpy(skb_push(skb_copy
, 1), &bt_cb(skb
)->pkt_type
, 1);
173 nskb
= skb_clone(skb_copy
, GFP_ATOMIC
);
177 if (sock_queue_rcv_skb(sk
, nskb
))
181 read_unlock(&hci_sk_list
.lock
);
186 /* Send frame to control socket */
187 void hci_send_to_control(struct sk_buff
*skb
, struct sock
*skip_sk
)
191 BT_DBG("len %d", skb
->len
);
193 read_lock(&hci_sk_list
.lock
);
195 sk_for_each(sk
, &hci_sk_list
.head
) {
196 struct sk_buff
*nskb
;
198 /* Skip the original socket */
202 if (sk
->sk_state
!= BT_BOUND
)
205 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_CONTROL
)
208 nskb
= skb_clone(skb
, GFP_ATOMIC
);
212 if (sock_queue_rcv_skb(sk
, nskb
))
216 read_unlock(&hci_sk_list
.lock
);
219 /* Send frame to monitor socket */
220 void hci_send_to_monitor(struct hci_dev
*hdev
, struct sk_buff
*skb
)
223 struct sk_buff
*skb_copy
= NULL
;
224 struct hci_mon_hdr
*hdr
;
227 if (!atomic_read(&monitor_promisc
))
230 BT_DBG("hdev %p len %d", hdev
, skb
->len
);
232 switch (bt_cb(skb
)->pkt_type
) {
233 case HCI_COMMAND_PKT
:
234 opcode
= cpu_to_le16(HCI_MON_COMMAND_PKT
);
237 opcode
= cpu_to_le16(HCI_MON_EVENT_PKT
);
239 case HCI_ACLDATA_PKT
:
240 if (bt_cb(skb
)->incoming
)
241 opcode
= cpu_to_le16(HCI_MON_ACL_RX_PKT
);
243 opcode
= cpu_to_le16(HCI_MON_ACL_TX_PKT
);
245 case HCI_SCODATA_PKT
:
246 if (bt_cb(skb
)->incoming
)
247 opcode
= cpu_to_le16(HCI_MON_SCO_RX_PKT
);
249 opcode
= cpu_to_le16(HCI_MON_SCO_TX_PKT
);
255 /* Create a private copy with headroom */
256 skb_copy
= __pskb_copy_fclone(skb
, HCI_MON_HDR_SIZE
, GFP_ATOMIC
, true);
260 /* Put header before the data */
261 hdr
= (void *) skb_push(skb_copy
, HCI_MON_HDR_SIZE
);
262 hdr
->opcode
= opcode
;
263 hdr
->index
= cpu_to_le16(hdev
->id
);
264 hdr
->len
= cpu_to_le16(skb
->len
);
266 read_lock(&hci_sk_list
.lock
);
268 sk_for_each(sk
, &hci_sk_list
.head
) {
269 struct sk_buff
*nskb
;
271 if (sk
->sk_state
!= BT_BOUND
)
274 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_MONITOR
)
277 nskb
= skb_clone(skb_copy
, GFP_ATOMIC
);
281 if (sock_queue_rcv_skb(sk
, nskb
))
285 read_unlock(&hci_sk_list
.lock
);
290 static void send_monitor_event(struct sk_buff
*skb
)
294 BT_DBG("len %d", skb
->len
);
296 read_lock(&hci_sk_list
.lock
);
298 sk_for_each(sk
, &hci_sk_list
.head
) {
299 struct sk_buff
*nskb
;
301 if (sk
->sk_state
!= BT_BOUND
)
304 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_MONITOR
)
307 nskb
= skb_clone(skb
, GFP_ATOMIC
);
311 if (sock_queue_rcv_skb(sk
, nskb
))
315 read_unlock(&hci_sk_list
.lock
);
318 static struct sk_buff
*create_monitor_event(struct hci_dev
*hdev
, int event
)
320 struct hci_mon_hdr
*hdr
;
321 struct hci_mon_new_index
*ni
;
327 skb
= bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE
, GFP_ATOMIC
);
331 ni
= (void *) skb_put(skb
, HCI_MON_NEW_INDEX_SIZE
);
332 ni
->type
= hdev
->dev_type
;
334 bacpy(&ni
->bdaddr
, &hdev
->bdaddr
);
335 memcpy(ni
->name
, hdev
->name
, 8);
337 opcode
= cpu_to_le16(HCI_MON_NEW_INDEX
);
341 skb
= bt_skb_alloc(0, GFP_ATOMIC
);
345 opcode
= cpu_to_le16(HCI_MON_DEL_INDEX
);
352 __net_timestamp(skb
);
354 hdr
= (void *) skb_push(skb
, HCI_MON_HDR_SIZE
);
355 hdr
->opcode
= opcode
;
356 hdr
->index
= cpu_to_le16(hdev
->id
);
357 hdr
->len
= cpu_to_le16(skb
->len
- HCI_MON_HDR_SIZE
);
362 static void send_monitor_replay(struct sock
*sk
)
364 struct hci_dev
*hdev
;
366 read_lock(&hci_dev_list_lock
);
368 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
371 skb
= create_monitor_event(hdev
, HCI_DEV_REG
);
375 if (sock_queue_rcv_skb(sk
, skb
))
379 read_unlock(&hci_dev_list_lock
);
382 /* Generate internal stack event */
383 static void hci_si_event(struct hci_dev
*hdev
, int type
, int dlen
, void *data
)
385 struct hci_event_hdr
*hdr
;
386 struct hci_ev_stack_internal
*ev
;
389 skb
= bt_skb_alloc(HCI_EVENT_HDR_SIZE
+ sizeof(*ev
) + dlen
, GFP_ATOMIC
);
393 hdr
= (void *) skb_put(skb
, HCI_EVENT_HDR_SIZE
);
394 hdr
->evt
= HCI_EV_STACK_INTERNAL
;
395 hdr
->plen
= sizeof(*ev
) + dlen
;
397 ev
= (void *) skb_put(skb
, sizeof(*ev
) + dlen
);
399 memcpy(ev
->data
, data
, dlen
);
401 bt_cb(skb
)->incoming
= 1;
402 __net_timestamp(skb
);
404 bt_cb(skb
)->pkt_type
= HCI_EVENT_PKT
;
405 hci_send_to_sock(hdev
, skb
);
409 void hci_sock_dev_event(struct hci_dev
*hdev
, int event
)
411 struct hci_ev_si_device ev
;
413 BT_DBG("hdev %s event %d", hdev
->name
, event
);
415 /* Send event to monitor */
416 if (atomic_read(&monitor_promisc
)) {
419 skb
= create_monitor_event(hdev
, event
);
421 send_monitor_event(skb
);
426 /* Send event to sockets */
428 ev
.dev_id
= hdev
->id
;
429 hci_si_event(NULL
, HCI_EV_SI_DEVICE
, sizeof(ev
), &ev
);
431 if (event
== HCI_DEV_UNREG
) {
434 /* Detach sockets from device */
435 read_lock(&hci_sk_list
.lock
);
436 sk_for_each(sk
, &hci_sk_list
.head
) {
437 bh_lock_sock_nested(sk
);
438 if (hci_pi(sk
)->hdev
== hdev
) {
439 hci_pi(sk
)->hdev
= NULL
;
441 sk
->sk_state
= BT_OPEN
;
442 sk
->sk_state_change(sk
);
448 read_unlock(&hci_sk_list
.lock
);
452 static int hci_sock_release(struct socket
*sock
)
454 struct sock
*sk
= sock
->sk
;
455 struct hci_dev
*hdev
;
457 BT_DBG("sock %p sk %p", sock
, sk
);
462 hdev
= hci_pi(sk
)->hdev
;
464 if (hci_pi(sk
)->channel
== HCI_CHANNEL_MONITOR
)
465 atomic_dec(&monitor_promisc
);
467 bt_sock_unlink(&hci_sk_list
, sk
);
470 if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
471 mgmt_index_added(hdev
);
472 clear_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
);
473 hci_dev_close(hdev
->id
);
476 atomic_dec(&hdev
->promisc
);
482 skb_queue_purge(&sk
->sk_receive_queue
);
483 skb_queue_purge(&sk
->sk_write_queue
);
489 static int hci_sock_blacklist_add(struct hci_dev
*hdev
, void __user
*arg
)
494 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
499 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &bdaddr
, BDADDR_BREDR
);
501 hci_dev_unlock(hdev
);
506 static int hci_sock_blacklist_del(struct hci_dev
*hdev
, void __user
*arg
)
511 if (copy_from_user(&bdaddr
, arg
, sizeof(bdaddr
)))
516 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &bdaddr
, BDADDR_BREDR
);
518 hci_dev_unlock(hdev
);
523 /* Ioctls that require bound socket */
524 static int hci_sock_bound_ioctl(struct sock
*sk
, unsigned int cmd
,
527 struct hci_dev
*hdev
= hci_pi(sk
)->hdev
;
532 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
))
535 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
538 if (hdev
->dev_type
!= HCI_BREDR
)
543 if (!capable(CAP_NET_ADMIN
))
548 return hci_get_conn_info(hdev
, (void __user
*) arg
);
551 return hci_get_auth_info(hdev
, (void __user
*) arg
);
554 if (!capable(CAP_NET_ADMIN
))
556 return hci_sock_blacklist_add(hdev
, (void __user
*) arg
);
559 if (!capable(CAP_NET_ADMIN
))
561 return hci_sock_blacklist_del(hdev
, (void __user
*) arg
);
567 static int hci_sock_ioctl(struct socket
*sock
, unsigned int cmd
,
570 void __user
*argp
= (void __user
*) arg
;
571 struct sock
*sk
= sock
->sk
;
574 BT_DBG("cmd %x arg %lx", cmd
, arg
);
578 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
587 return hci_get_dev_list(argp
);
590 return hci_get_dev_info(argp
);
593 return hci_get_conn_list(argp
);
596 if (!capable(CAP_NET_ADMIN
))
598 return hci_dev_open(arg
);
601 if (!capable(CAP_NET_ADMIN
))
603 return hci_dev_close(arg
);
606 if (!capable(CAP_NET_ADMIN
))
608 return hci_dev_reset(arg
);
611 if (!capable(CAP_NET_ADMIN
))
613 return hci_dev_reset_stat(arg
);
623 if (!capable(CAP_NET_ADMIN
))
625 return hci_dev_cmd(cmd
, argp
);
628 return hci_inquiry(argp
);
633 err
= hci_sock_bound_ioctl(sk
, cmd
, arg
);
640 static int hci_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
643 struct sockaddr_hci haddr
;
644 struct sock
*sk
= sock
->sk
;
645 struct hci_dev
*hdev
= NULL
;
648 BT_DBG("sock %p sk %p", sock
, sk
);
653 memset(&haddr
, 0, sizeof(haddr
));
654 len
= min_t(unsigned int, sizeof(haddr
), addr_len
);
655 memcpy(&haddr
, addr
, len
);
657 if (haddr
.hci_family
!= AF_BLUETOOTH
)
662 if (sk
->sk_state
== BT_BOUND
) {
667 switch (haddr
.hci_channel
) {
668 case HCI_CHANNEL_RAW
:
669 if (hci_pi(sk
)->hdev
) {
674 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
675 hdev
= hci_dev_get(haddr
.hci_dev
);
681 atomic_inc(&hdev
->promisc
);
684 hci_pi(sk
)->hdev
= hdev
;
687 case HCI_CHANNEL_USER
:
688 if (hci_pi(sk
)->hdev
) {
693 if (haddr
.hci_dev
== HCI_DEV_NONE
) {
698 if (!capable(CAP_NET_ADMIN
)) {
703 hdev
= hci_dev_get(haddr
.hci_dev
);
709 if (test_bit(HCI_UP
, &hdev
->flags
) ||
710 test_bit(HCI_INIT
, &hdev
->flags
) ||
711 test_bit(HCI_SETUP
, &hdev
->dev_flags
) ||
712 test_bit(HCI_CONFIG
, &hdev
->dev_flags
)) {
718 if (test_and_set_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
724 mgmt_index_removed(hdev
);
726 err
= hci_dev_open(hdev
->id
);
728 clear_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
);
729 mgmt_index_added(hdev
);
734 atomic_inc(&hdev
->promisc
);
736 hci_pi(sk
)->hdev
= hdev
;
739 case HCI_CHANNEL_CONTROL
:
740 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
745 if (!capable(CAP_NET_ADMIN
)) {
752 case HCI_CHANNEL_MONITOR
:
753 if (haddr
.hci_dev
!= HCI_DEV_NONE
) {
758 if (!capable(CAP_NET_RAW
)) {
763 send_monitor_replay(sk
);
765 atomic_inc(&monitor_promisc
);
774 hci_pi(sk
)->channel
= haddr
.hci_channel
;
775 sk
->sk_state
= BT_BOUND
;
782 static int hci_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
783 int *addr_len
, int peer
)
785 struct sockaddr_hci
*haddr
= (struct sockaddr_hci
*) addr
;
786 struct sock
*sk
= sock
->sk
;
787 struct hci_dev
*hdev
;
790 BT_DBG("sock %p sk %p", sock
, sk
);
797 hdev
= hci_pi(sk
)->hdev
;
803 *addr_len
= sizeof(*haddr
);
804 haddr
->hci_family
= AF_BLUETOOTH
;
805 haddr
->hci_dev
= hdev
->id
;
806 haddr
->hci_channel
= hci_pi(sk
)->channel
;
813 static void hci_sock_cmsg(struct sock
*sk
, struct msghdr
*msg
,
816 __u32 mask
= hci_pi(sk
)->cmsg_mask
;
818 if (mask
& HCI_CMSG_DIR
) {
819 int incoming
= bt_cb(skb
)->incoming
;
820 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_DIR
, sizeof(incoming
),
824 if (mask
& HCI_CMSG_TSTAMP
) {
826 struct compat_timeval ctv
;
832 skb_get_timestamp(skb
, &tv
);
837 if (!COMPAT_USE_64BIT_TIME
&&
838 (msg
->msg_flags
& MSG_CMSG_COMPAT
)) {
839 ctv
.tv_sec
= tv
.tv_sec
;
840 ctv
.tv_usec
= tv
.tv_usec
;
846 put_cmsg(msg
, SOL_HCI
, HCI_CMSG_TSTAMP
, len
, data
);
850 static int hci_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
851 struct msghdr
*msg
, size_t len
, int flags
)
853 int noblock
= flags
& MSG_DONTWAIT
;
854 struct sock
*sk
= sock
->sk
;
858 BT_DBG("sock %p, sk %p", sock
, sk
);
860 if (flags
& (MSG_OOB
))
863 if (sk
->sk_state
== BT_CLOSED
)
866 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
872 msg
->msg_flags
|= MSG_TRUNC
;
876 skb_reset_transport_header(skb
);
877 err
= skb_copy_datagram_msg(skb
, 0, msg
, copied
);
879 switch (hci_pi(sk
)->channel
) {
880 case HCI_CHANNEL_RAW
:
881 hci_sock_cmsg(sk
, msg
, skb
);
883 case HCI_CHANNEL_USER
:
884 case HCI_CHANNEL_CONTROL
:
885 case HCI_CHANNEL_MONITOR
:
886 sock_recv_timestamp(msg
, sk
, skb
);
890 skb_free_datagram(sk
, skb
);
892 return err
? : copied
;
895 static int hci_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
896 struct msghdr
*msg
, size_t len
)
898 struct sock
*sk
= sock
->sk
;
899 struct hci_dev
*hdev
;
903 BT_DBG("sock %p sk %p", sock
, sk
);
905 if (msg
->msg_flags
& MSG_OOB
)
908 if (msg
->msg_flags
& ~(MSG_DONTWAIT
|MSG_NOSIGNAL
|MSG_ERRQUEUE
))
911 if (len
< 4 || len
> HCI_MAX_FRAME_SIZE
)
916 switch (hci_pi(sk
)->channel
) {
917 case HCI_CHANNEL_RAW
:
918 case HCI_CHANNEL_USER
:
920 case HCI_CHANNEL_CONTROL
:
921 err
= mgmt_control(sk
, msg
, len
);
923 case HCI_CHANNEL_MONITOR
:
931 hdev
= hci_pi(sk
)->hdev
;
937 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
942 skb
= bt_skb_send_alloc(sk
, len
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
946 if (memcpy_from_msg(skb_put(skb
, len
), msg
, len
)) {
951 bt_cb(skb
)->pkt_type
= *((unsigned char *) skb
->data
);
954 if (hci_pi(sk
)->channel
== HCI_CHANNEL_USER
) {
955 /* No permission check is needed for user channel
956 * since that gets enforced when binding the socket.
958 * However check that the packet type is valid.
960 if (bt_cb(skb
)->pkt_type
!= HCI_COMMAND_PKT
&&
961 bt_cb(skb
)->pkt_type
!= HCI_ACLDATA_PKT
&&
962 bt_cb(skb
)->pkt_type
!= HCI_SCODATA_PKT
) {
967 skb_queue_tail(&hdev
->raw_q
, skb
);
968 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
969 } else if (bt_cb(skb
)->pkt_type
== HCI_COMMAND_PKT
) {
970 u16 opcode
= get_unaligned_le16(skb
->data
);
971 u16 ogf
= hci_opcode_ogf(opcode
);
972 u16 ocf
= hci_opcode_ocf(opcode
);
974 if (((ogf
> HCI_SFLT_MAX_OGF
) ||
975 !hci_test_bit(ocf
& HCI_FLT_OCF_BITS
,
976 &hci_sec_filter
.ocf_mask
[ogf
])) &&
977 !capable(CAP_NET_RAW
)) {
983 skb_queue_tail(&hdev
->raw_q
, skb
);
984 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
986 /* Stand-alone HCI commands must be flagged as
987 * single-command requests.
989 bt_cb(skb
)->req
.start
= true;
991 skb_queue_tail(&hdev
->cmd_q
, skb
);
992 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
995 if (!capable(CAP_NET_RAW
)) {
1000 skb_queue_tail(&hdev
->raw_q
, skb
);
1001 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
1015 static int hci_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1016 char __user
*optval
, unsigned int len
)
1018 struct hci_ufilter uf
= { .opcode
= 0 };
1019 struct sock
*sk
= sock
->sk
;
1020 int err
= 0, opt
= 0;
1022 BT_DBG("sk %p, opt %d", sk
, optname
);
1026 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
1033 if (get_user(opt
, (int __user
*)optval
)) {
1039 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_DIR
;
1041 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_DIR
;
1044 case HCI_TIME_STAMP
:
1045 if (get_user(opt
, (int __user
*)optval
)) {
1051 hci_pi(sk
)->cmsg_mask
|= HCI_CMSG_TSTAMP
;
1053 hci_pi(sk
)->cmsg_mask
&= ~HCI_CMSG_TSTAMP
;
1058 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1060 uf
.type_mask
= f
->type_mask
;
1061 uf
.opcode
= f
->opcode
;
1062 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
1063 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
1066 len
= min_t(unsigned int, len
, sizeof(uf
));
1067 if (copy_from_user(&uf
, optval
, len
)) {
1072 if (!capable(CAP_NET_RAW
)) {
1073 uf
.type_mask
&= hci_sec_filter
.type_mask
;
1074 uf
.event_mask
[0] &= *((u32
*) hci_sec_filter
.event_mask
+ 0);
1075 uf
.event_mask
[1] &= *((u32
*) hci_sec_filter
.event_mask
+ 1);
1079 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1081 f
->type_mask
= uf
.type_mask
;
1082 f
->opcode
= uf
.opcode
;
1083 *((u32
*) f
->event_mask
+ 0) = uf
.event_mask
[0];
1084 *((u32
*) f
->event_mask
+ 1) = uf
.event_mask
[1];
1098 static int hci_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1099 char __user
*optval
, int __user
*optlen
)
1101 struct hci_ufilter uf
;
1102 struct sock
*sk
= sock
->sk
;
1103 int len
, opt
, err
= 0;
1105 BT_DBG("sk %p, opt %d", sk
, optname
);
1107 if (get_user(len
, optlen
))
1112 if (hci_pi(sk
)->channel
!= HCI_CHANNEL_RAW
) {
1119 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_DIR
)
1124 if (put_user(opt
, optval
))
1128 case HCI_TIME_STAMP
:
1129 if (hci_pi(sk
)->cmsg_mask
& HCI_CMSG_TSTAMP
)
1134 if (put_user(opt
, optval
))
1140 struct hci_filter
*f
= &hci_pi(sk
)->filter
;
1142 memset(&uf
, 0, sizeof(uf
));
1143 uf
.type_mask
= f
->type_mask
;
1144 uf
.opcode
= f
->opcode
;
1145 uf
.event_mask
[0] = *((u32
*) f
->event_mask
+ 0);
1146 uf
.event_mask
[1] = *((u32
*) f
->event_mask
+ 1);
1149 len
= min_t(unsigned int, len
, sizeof(uf
));
1150 if (copy_to_user(optval
, &uf
, len
))
1164 static const struct proto_ops hci_sock_ops
= {
1165 .family
= PF_BLUETOOTH
,
1166 .owner
= THIS_MODULE
,
1167 .release
= hci_sock_release
,
1168 .bind
= hci_sock_bind
,
1169 .getname
= hci_sock_getname
,
1170 .sendmsg
= hci_sock_sendmsg
,
1171 .recvmsg
= hci_sock_recvmsg
,
1172 .ioctl
= hci_sock_ioctl
,
1173 .poll
= datagram_poll
,
1174 .listen
= sock_no_listen
,
1175 .shutdown
= sock_no_shutdown
,
1176 .setsockopt
= hci_sock_setsockopt
,
1177 .getsockopt
= hci_sock_getsockopt
,
1178 .connect
= sock_no_connect
,
1179 .socketpair
= sock_no_socketpair
,
1180 .accept
= sock_no_accept
,
1181 .mmap
= sock_no_mmap
1184 static struct proto hci_sk_proto
= {
1186 .owner
= THIS_MODULE
,
1187 .obj_size
= sizeof(struct hci_pinfo
)
1190 static int hci_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
1195 BT_DBG("sock %p", sock
);
1197 if (sock
->type
!= SOCK_RAW
)
1198 return -ESOCKTNOSUPPORT
;
1200 sock
->ops
= &hci_sock_ops
;
1202 sk
= sk_alloc(net
, PF_BLUETOOTH
, GFP_ATOMIC
, &hci_sk_proto
);
1206 sock_init_data(sock
, sk
);
1208 sock_reset_flag(sk
, SOCK_ZAPPED
);
1210 sk
->sk_protocol
= protocol
;
1212 sock
->state
= SS_UNCONNECTED
;
1213 sk
->sk_state
= BT_OPEN
;
1215 bt_sock_link(&hci_sk_list
, sk
);
1219 static const struct net_proto_family hci_sock_family_ops
= {
1220 .family
= PF_BLUETOOTH
,
1221 .owner
= THIS_MODULE
,
1222 .create
= hci_sock_create
,
1225 int __init
hci_sock_init(void)
1229 BUILD_BUG_ON(sizeof(struct sockaddr_hci
) > sizeof(struct sockaddr
));
1231 err
= proto_register(&hci_sk_proto
, 0);
1235 err
= bt_sock_register(BTPROTO_HCI
, &hci_sock_family_ops
);
1237 BT_ERR("HCI socket registration failed");
1241 err
= bt_procfs_init(&init_net
, "hci", &hci_sk_list
, NULL
);
1243 BT_ERR("Failed to create HCI proc file");
1244 bt_sock_unregister(BTPROTO_HCI
);
1248 BT_INFO("HCI socket layer initialized");
1253 proto_unregister(&hci_sk_proto
);
1257 void hci_sock_cleanup(void)
1259 bt_procfs_cleanup(&init_net
, "hci");
1260 bt_sock_unregister(BTPROTO_HCI
);
1261 proto_unregister(&hci_sk_proto
);