2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
56 #define AUTO_OFF_TIMEOUT 2000
60 static void hci_rx_work(struct work_struct
*work
);
61 static void hci_cmd_work(struct work_struct
*work
);
62 static void hci_tx_work(struct work_struct
*work
);
65 LIST_HEAD(hci_dev_list
);
66 DEFINE_RWLOCK(hci_dev_list_lock
);
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list
);
70 DEFINE_RWLOCK(hci_cb_list_lock
);
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block
*nb
)
79 return atomic_notifier_chain_register(&hci_notifier
, nb
);
82 int hci_unregister_notifier(struct notifier_block
*nb
)
84 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
87 static void hci_notify(struct hci_dev
*hdev
, int event
)
89 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
101 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
104 if (hdev
->req_status
== HCI_REQ_PEND
) {
105 hdev
->req_result
= result
;
106 hdev
->req_status
= HCI_REQ_DONE
;
107 wake_up_interruptible(&hdev
->req_wait_q
);
111 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
113 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
115 if (hdev
->req_status
== HCI_REQ_PEND
) {
116 hdev
->req_result
= err
;
117 hdev
->req_status
= HCI_REQ_CANCELED
;
118 wake_up_interruptible(&hdev
->req_wait_q
);
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
124 unsigned long opt
, __u32 timeout
)
126 DECLARE_WAITQUEUE(wait
, current
);
129 BT_DBG("%s start", hdev
->name
);
131 hdev
->req_status
= HCI_REQ_PEND
;
133 add_wait_queue(&hdev
->req_wait_q
, &wait
);
134 set_current_state(TASK_INTERRUPTIBLE
);
137 schedule_timeout(timeout
);
139 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
141 if (signal_pending(current
))
144 switch (hdev
->req_status
) {
146 err
= -bt_to_errno(hdev
->req_result
);
149 case HCI_REQ_CANCELED
:
150 err
= -hdev
->req_result
;
158 hdev
->req_status
= hdev
->req_result
= 0;
160 BT_DBG("%s end: err %d", hdev
->name
, err
);
165 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
166 unsigned long opt
, __u32 timeout
)
170 if (!test_bit(HCI_UP
, &hdev
->flags
))
173 /* Serialize all requests */
175 ret
= __hci_request(hdev
, req
, opt
, timeout
);
176 hci_req_unlock(hdev
);
181 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
183 BT_DBG("%s %ld", hdev
->name
, opt
);
186 set_bit(HCI_RESET
, &hdev
->flags
);
187 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
190 static void bredr_init(struct hci_dev
*hdev
)
192 struct hci_cp_delete_stored_link_key cp
;
196 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
198 /* Mandatory initialization */
201 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
202 set_bit(HCI_RESET
, &hdev
->flags
);
203 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
209 /* Read Local Version */
210 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
215 /* Read BD Address */
216 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
218 /* Read Class of Device */
219 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
221 /* Read Local Name */
222 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
224 /* Read Voice Setting */
225 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
227 /* Optional initialization */
229 /* Clear Event Filters */
230 flt_type
= HCI_FLT_CLEAR_ALL
;
231 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
233 /* Connection accept timeout ~20 secs */
234 param
= cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
237 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
239 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
242 static void amp_init(struct hci_dev
*hdev
)
244 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
247 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
249 /* Read Local Version */
250 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
253 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
257 BT_DBG("%s %ld", hdev
->name
, opt
);
259 /* Driver initialization */
261 /* Special commands */
262 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
263 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
264 skb
->dev
= (void *) hdev
;
266 skb_queue_tail(&hdev
->cmd_q
, skb
);
267 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
269 skb_queue_purge(&hdev
->driver_init
);
271 switch (hdev
->dev_type
) {
281 BT_ERR("Unknown device type %d", hdev
->dev_type
);
287 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
289 BT_DBG("%s", hdev
->name
);
291 /* Read LE buffer size */
292 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
295 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
299 BT_DBG("%s %x", hdev
->name
, scan
);
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
305 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
309 BT_DBG("%s %x", hdev
->name
, auth
);
312 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
315 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
319 BT_DBG("%s %x", hdev
->name
, encrypt
);
322 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
325 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
327 __le16 policy
= cpu_to_le16(opt
);
329 BT_DBG("%s %x", hdev
->name
, policy
);
331 /* Default link policy */
332 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
335 /* Get HCI device by index.
336 * Device is held on return. */
337 struct hci_dev
*hci_dev_get(int index
)
339 struct hci_dev
*hdev
= NULL
, *d
;
346 read_lock(&hci_dev_list_lock
);
347 list_for_each_entry(d
, &hci_dev_list
, list
) {
348 if (d
->id
== index
) {
349 hdev
= hci_dev_hold(d
);
353 read_unlock(&hci_dev_list_lock
);
357 /* ---- Inquiry support ---- */
359 bool hci_discovery_active(struct hci_dev
*hdev
)
361 struct discovery_state
*discov
= &hdev
->discovery
;
363 switch (discov
->state
) {
364 case DISCOVERY_INQUIRY
:
365 case DISCOVERY_LE_SCAN
:
366 case DISCOVERY_RESOLVING
:
374 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
376 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
378 if (hdev
->discovery
.state
== state
)
382 case DISCOVERY_STOPPED
:
383 mgmt_discovering(hdev
, 0);
385 case DISCOVERY_STARTING
:
387 case DISCOVERY_INQUIRY
:
388 case DISCOVERY_LE_SCAN
:
389 mgmt_discovering(hdev
, 1);
391 case DISCOVERY_RESOLVING
:
393 case DISCOVERY_STOPPING
:
397 hdev
->discovery
.state
= state
;
400 static void inquiry_cache_flush(struct hci_dev
*hdev
)
402 struct discovery_state
*cache
= &hdev
->discovery
;
403 struct inquiry_entry
*p
, *n
;
405 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
410 INIT_LIST_HEAD(&cache
->unknown
);
411 INIT_LIST_HEAD(&cache
->resolve
);
412 cache
->state
= DISCOVERY_STOPPED
;
415 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
417 struct discovery_state
*cache
= &hdev
->discovery
;
418 struct inquiry_entry
*e
;
420 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
422 list_for_each_entry(e
, &cache
->all
, all
) {
423 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
430 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
433 struct discovery_state
*cache
= &hdev
->discovery
;
434 struct inquiry_entry
*e
;
436 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
438 list_for_each_entry(e
, &cache
->unknown
, list
) {
439 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
446 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
450 struct discovery_state
*cache
= &hdev
->discovery
;
451 struct inquiry_entry
*e
;
453 BT_DBG("cache %p bdaddr %s state %d", cache
, batostr(bdaddr
), state
);
455 list_for_each_entry(e
, &cache
->resolve
, list
) {
456 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
458 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
465 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
466 struct inquiry_entry
*ie
)
468 struct discovery_state
*cache
= &hdev
->discovery
;
469 struct list_head
*pos
= &cache
->resolve
;
470 struct inquiry_entry
*p
;
474 list_for_each_entry(p
, &cache
->resolve
, list
) {
475 if (p
->name_state
!= NAME_PENDING
&&
476 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
481 list_add(&ie
->list
, pos
);
484 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
487 struct discovery_state
*cache
= &hdev
->discovery
;
488 struct inquiry_entry
*ie
;
490 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
492 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
494 if (ie
->name_state
== NAME_NEEDED
&&
495 data
->rssi
!= ie
->data
.rssi
) {
496 ie
->data
.rssi
= data
->rssi
;
497 hci_inquiry_cache_update_resolve(hdev
, ie
);
503 /* Entry not in the cache. Add new one. */
504 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
508 list_add(&ie
->all
, &cache
->all
);
511 ie
->name_state
= NAME_KNOWN
;
513 ie
->name_state
= NAME_NOT_KNOWN
;
514 list_add(&ie
->list
, &cache
->unknown
);
518 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
519 ie
->name_state
!= NAME_PENDING
) {
520 ie
->name_state
= NAME_KNOWN
;
524 memcpy(&ie
->data
, data
, sizeof(*data
));
525 ie
->timestamp
= jiffies
;
526 cache
->timestamp
= jiffies
;
528 if (ie
->name_state
== NAME_NOT_KNOWN
)
534 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
536 struct discovery_state
*cache
= &hdev
->discovery
;
537 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
538 struct inquiry_entry
*e
;
541 list_for_each_entry(e
, &cache
->all
, all
) {
542 struct inquiry_data
*data
= &e
->data
;
547 bacpy(&info
->bdaddr
, &data
->bdaddr
);
548 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
549 info
->pscan_period_mode
= data
->pscan_period_mode
;
550 info
->pscan_mode
= data
->pscan_mode
;
551 memcpy(info
->dev_class
, data
->dev_class
, 3);
552 info
->clock_offset
= data
->clock_offset
;
558 BT_DBG("cache %p, copied %d", cache
, copied
);
562 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
564 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
565 struct hci_cp_inquiry cp
;
567 BT_DBG("%s", hdev
->name
);
569 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
573 memcpy(&cp
.lap
, &ir
->lap
, 3);
574 cp
.length
= ir
->length
;
575 cp
.num_rsp
= ir
->num_rsp
;
576 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
579 int hci_inquiry(void __user
*arg
)
581 __u8 __user
*ptr
= arg
;
582 struct hci_inquiry_req ir
;
583 struct hci_dev
*hdev
;
584 int err
= 0, do_inquiry
= 0, max_rsp
;
588 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
591 hdev
= hci_dev_get(ir
.dev_id
);
596 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
597 inquiry_cache_empty(hdev
) ||
598 ir
.flags
& IREQ_CACHE_FLUSH
) {
599 inquiry_cache_flush(hdev
);
602 hci_dev_unlock(hdev
);
604 timeo
= ir
.length
* msecs_to_jiffies(2000);
607 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
612 /* for unlimited number of responses we will use buffer with 255 entries */
613 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
616 * copy it to the user space.
618 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
625 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
626 hci_dev_unlock(hdev
);
628 BT_DBG("num_rsp %d", ir
.num_rsp
);
630 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
632 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
645 /* ---- HCI ioctl helpers ---- */
647 int hci_dev_open(__u16 dev
)
649 struct hci_dev
*hdev
;
652 hdev
= hci_dev_get(dev
);
656 BT_DBG("%s %p", hdev
->name
, hdev
);
660 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
665 if (test_bit(HCI_UP
, &hdev
->flags
)) {
670 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
671 set_bit(HCI_RAW
, &hdev
->flags
);
673 /* Treat all non BR/EDR controllers as raw devices if
674 enable_hs is not set */
675 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
676 set_bit(HCI_RAW
, &hdev
->flags
);
678 if (hdev
->open(hdev
)) {
683 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
684 atomic_set(&hdev
->cmd_cnt
, 1);
685 set_bit(HCI_INIT
, &hdev
->flags
);
686 hdev
->init_last_cmd
= 0;
688 ret
= __hci_request(hdev
, hci_init_req
, 0,
689 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
691 if (lmp_host_le_capable(hdev
))
692 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
695 clear_bit(HCI_INIT
, &hdev
->flags
);
700 set_bit(HCI_UP
, &hdev
->flags
);
701 hci_notify(hdev
, HCI_DEV_UP
);
702 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
704 mgmt_powered(hdev
, 1);
705 hci_dev_unlock(hdev
);
708 /* Init failed, cleanup */
709 flush_work(&hdev
->tx_work
);
710 flush_work(&hdev
->cmd_work
);
711 flush_work(&hdev
->rx_work
);
713 skb_queue_purge(&hdev
->cmd_q
);
714 skb_queue_purge(&hdev
->rx_q
);
719 if (hdev
->sent_cmd
) {
720 kfree_skb(hdev
->sent_cmd
);
721 hdev
->sent_cmd
= NULL
;
729 hci_req_unlock(hdev
);
734 static int hci_dev_do_close(struct hci_dev
*hdev
)
736 BT_DBG("%s %p", hdev
->name
, hdev
);
738 hci_req_cancel(hdev
, ENODEV
);
741 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
742 del_timer_sync(&hdev
->cmd_timer
);
743 hci_req_unlock(hdev
);
747 /* Flush RX and TX works */
748 flush_work(&hdev
->tx_work
);
749 flush_work(&hdev
->rx_work
);
751 if (hdev
->discov_timeout
> 0) {
752 cancel_delayed_work(&hdev
->discov_off
);
753 hdev
->discov_timeout
= 0;
756 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
757 cancel_delayed_work(&hdev
->power_off
);
759 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
760 cancel_delayed_work(&hdev
->service_cache
);
763 inquiry_cache_flush(hdev
);
764 hci_conn_hash_flush(hdev
);
765 hci_dev_unlock(hdev
);
767 hci_notify(hdev
, HCI_DEV_DOWN
);
773 skb_queue_purge(&hdev
->cmd_q
);
774 atomic_set(&hdev
->cmd_cnt
, 1);
775 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
776 test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
777 set_bit(HCI_INIT
, &hdev
->flags
);
778 __hci_request(hdev
, hci_reset_req
, 0,
779 msecs_to_jiffies(250));
780 clear_bit(HCI_INIT
, &hdev
->flags
);
784 flush_work(&hdev
->cmd_work
);
787 skb_queue_purge(&hdev
->rx_q
);
788 skb_queue_purge(&hdev
->cmd_q
);
789 skb_queue_purge(&hdev
->raw_q
);
791 /* Drop last sent command */
792 if (hdev
->sent_cmd
) {
793 del_timer_sync(&hdev
->cmd_timer
);
794 kfree_skb(hdev
->sent_cmd
);
795 hdev
->sent_cmd
= NULL
;
798 /* After this point our queues are empty
799 * and no tasks are scheduled. */
803 mgmt_powered(hdev
, 0);
804 hci_dev_unlock(hdev
);
809 hci_req_unlock(hdev
);
815 int hci_dev_close(__u16 dev
)
817 struct hci_dev
*hdev
;
820 hdev
= hci_dev_get(dev
);
823 err
= hci_dev_do_close(hdev
);
828 int hci_dev_reset(__u16 dev
)
830 struct hci_dev
*hdev
;
833 hdev
= hci_dev_get(dev
);
839 if (!test_bit(HCI_UP
, &hdev
->flags
))
843 skb_queue_purge(&hdev
->rx_q
);
844 skb_queue_purge(&hdev
->cmd_q
);
847 inquiry_cache_flush(hdev
);
848 hci_conn_hash_flush(hdev
);
849 hci_dev_unlock(hdev
);
854 atomic_set(&hdev
->cmd_cnt
, 1);
855 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
857 if (!test_bit(HCI_RAW
, &hdev
->flags
))
858 ret
= __hci_request(hdev
, hci_reset_req
, 0,
859 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
862 hci_req_unlock(hdev
);
867 int hci_dev_reset_stat(__u16 dev
)
869 struct hci_dev
*hdev
;
872 hdev
= hci_dev_get(dev
);
876 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
883 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
885 struct hci_dev
*hdev
;
886 struct hci_dev_req dr
;
889 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
892 hdev
= hci_dev_get(dr
.dev_id
);
898 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
899 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
903 if (!lmp_encrypt_capable(hdev
)) {
908 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
909 /* Auth must be enabled first */
910 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
911 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
916 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
917 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
921 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
922 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
926 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
927 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
931 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
932 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
936 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
940 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
941 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
945 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
946 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
958 int hci_get_dev_list(void __user
*arg
)
960 struct hci_dev
*hdev
;
961 struct hci_dev_list_req
*dl
;
962 struct hci_dev_req
*dr
;
963 int n
= 0, size
, err
;
966 if (get_user(dev_num
, (__u16 __user
*) arg
))
969 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
972 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
974 dl
= kzalloc(size
, GFP_KERNEL
);
980 read_lock(&hci_dev_list_lock
);
981 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
982 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
983 cancel_delayed_work(&hdev
->power_off
);
985 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
986 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
988 (dr
+ n
)->dev_id
= hdev
->id
;
989 (dr
+ n
)->dev_opt
= hdev
->flags
;
994 read_unlock(&hci_dev_list_lock
);
997 size
= sizeof(*dl
) + n
* sizeof(*dr
);
999 err
= copy_to_user(arg
, dl
, size
);
1002 return err
? -EFAULT
: 0;
1005 int hci_get_dev_info(void __user
*arg
)
1007 struct hci_dev
*hdev
;
1008 struct hci_dev_info di
;
1011 if (copy_from_user(&di
, arg
, sizeof(di
)))
1014 hdev
= hci_dev_get(di
.dev_id
);
1018 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1019 cancel_delayed_work_sync(&hdev
->power_off
);
1021 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1022 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1024 strcpy(di
.name
, hdev
->name
);
1025 di
.bdaddr
= hdev
->bdaddr
;
1026 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1027 di
.flags
= hdev
->flags
;
1028 di
.pkt_type
= hdev
->pkt_type
;
1029 di
.acl_mtu
= hdev
->acl_mtu
;
1030 di
.acl_pkts
= hdev
->acl_pkts
;
1031 di
.sco_mtu
= hdev
->sco_mtu
;
1032 di
.sco_pkts
= hdev
->sco_pkts
;
1033 di
.link_policy
= hdev
->link_policy
;
1034 di
.link_mode
= hdev
->link_mode
;
1036 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1037 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1039 if (copy_to_user(arg
, &di
, sizeof(di
)))
1047 /* ---- Interface to HCI drivers ---- */
1049 static int hci_rfkill_set_block(void *data
, bool blocked
)
1051 struct hci_dev
*hdev
= data
;
1053 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1058 hci_dev_do_close(hdev
);
1063 static const struct rfkill_ops hci_rfkill_ops
= {
1064 .set_block
= hci_rfkill_set_block
,
1067 /* Alloc HCI device */
1068 struct hci_dev
*hci_alloc_dev(void)
1070 struct hci_dev
*hdev
;
1072 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1076 hci_init_sysfs(hdev
);
1077 skb_queue_head_init(&hdev
->driver_init
);
1081 EXPORT_SYMBOL(hci_alloc_dev
);
1083 /* Free HCI device */
1084 void hci_free_dev(struct hci_dev
*hdev
)
1086 skb_queue_purge(&hdev
->driver_init
);
1088 /* will free via device release */
1089 put_device(&hdev
->dev
);
1091 EXPORT_SYMBOL(hci_free_dev
);
1093 static void hci_power_on(struct work_struct
*work
)
1095 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1097 BT_DBG("%s", hdev
->name
);
1099 if (hci_dev_open(hdev
->id
) < 0)
1102 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1103 schedule_delayed_work(&hdev
->power_off
,
1104 msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
1106 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1107 mgmt_index_added(hdev
);
1110 static void hci_power_off(struct work_struct
*work
)
1112 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1115 BT_DBG("%s", hdev
->name
);
1117 clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1119 hci_dev_close(hdev
->id
);
1122 static void hci_discov_off(struct work_struct
*work
)
1124 struct hci_dev
*hdev
;
1125 u8 scan
= SCAN_PAGE
;
1127 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1129 BT_DBG("%s", hdev
->name
);
1133 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1135 hdev
->discov_timeout
= 0;
1137 hci_dev_unlock(hdev
);
1140 int hci_uuids_clear(struct hci_dev
*hdev
)
1142 struct list_head
*p
, *n
;
1144 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1145 struct bt_uuid
*uuid
;
1147 uuid
= list_entry(p
, struct bt_uuid
, list
);
1156 int hci_link_keys_clear(struct hci_dev
*hdev
)
1158 struct list_head
*p
, *n
;
1160 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1161 struct link_key
*key
;
1163 key
= list_entry(p
, struct link_key
, list
);
1172 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1174 struct smp_ltk
*k
, *tmp
;
1176 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1184 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1188 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1189 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1195 static int hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1196 u8 key_type
, u8 old_key_type
)
1199 if (key_type
< 0x03)
1202 /* Debug keys are insecure so don't store them persistently */
1203 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1206 /* Changed combination key and there's no previous one */
1207 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1210 /* Security mode 3 case */
1214 /* Neither local nor remote side had no-bonding as requirement */
1215 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1218 /* Local side had dedicated bonding as requirement */
1219 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1222 /* Remote side had dedicated bonding as requirement */
1223 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1226 /* If none of the above criteria match, then don't store the key
1231 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1235 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1236 if (k
->ediv
!= ediv
||
1237 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1245 EXPORT_SYMBOL(hci_find_ltk
);
1247 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1252 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1253 if (addr_type
== k
->bdaddr_type
&&
1254 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1259 EXPORT_SYMBOL(hci_find_ltk_by_addr
);
1261 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1262 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1264 struct link_key
*key
, *old_key
;
1265 u8 old_key_type
, persistent
;
1267 old_key
= hci_find_link_key(hdev
, bdaddr
);
1269 old_key_type
= old_key
->type
;
1272 old_key_type
= conn
? conn
->key_type
: 0xff;
1273 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1276 list_add(&key
->list
, &hdev
->link_keys
);
1279 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1281 /* Some buggy controller combinations generate a changed
1282 * combination key for legacy pairing even when there's no
1284 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1285 (!conn
|| conn
->remote_auth
== 0xff) &&
1286 old_key_type
== 0xff) {
1287 type
= HCI_LK_COMBINATION
;
1289 conn
->key_type
= type
;
1292 bacpy(&key
->bdaddr
, bdaddr
);
1293 memcpy(key
->val
, val
, 16);
1294 key
->pin_len
= pin_len
;
1296 if (type
== HCI_LK_CHANGED_COMBINATION
)
1297 key
->type
= old_key_type
;
1304 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1306 mgmt_new_link_key(hdev
, key
, persistent
);
1309 list_del(&key
->list
);
1316 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1317 int new_key
, u8 authenticated
, u8 tk
[16],
1318 u8 enc_size
, u16 ediv
, u8 rand
[8])
1320 struct smp_ltk
*key
, *old_key
;
1322 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1325 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1329 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1332 list_add(&key
->list
, &hdev
->long_term_keys
);
1335 bacpy(&key
->bdaddr
, bdaddr
);
1336 key
->bdaddr_type
= addr_type
;
1337 memcpy(key
->val
, tk
, sizeof(key
->val
));
1338 key
->authenticated
= authenticated
;
1340 key
->enc_size
= enc_size
;
1342 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1347 if (type
& HCI_SMP_LTK
)
1348 mgmt_new_ltk(hdev
, key
, 1);
1353 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1355 struct link_key
*key
;
1357 key
= hci_find_link_key(hdev
, bdaddr
);
1361 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1363 list_del(&key
->list
);
1369 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1371 struct smp_ltk
*k
, *tmp
;
1373 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1374 if (bacmp(bdaddr
, &k
->bdaddr
))
1377 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1386 /* HCI command timer function */
1387 static void hci_cmd_timer(unsigned long arg
)
1389 struct hci_dev
*hdev
= (void *) arg
;
1391 BT_ERR("%s command tx timeout", hdev
->name
);
1392 atomic_set(&hdev
->cmd_cnt
, 1);
1393 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1396 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1399 struct oob_data
*data
;
1401 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1402 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1408 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1410 struct oob_data
*data
;
1412 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1416 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1418 list_del(&data
->list
);
1424 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1426 struct oob_data
*data
, *n
;
1428 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1429 list_del(&data
->list
);
1436 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1439 struct oob_data
*data
;
1441 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1444 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1448 bacpy(&data
->bdaddr
, bdaddr
);
1449 list_add(&data
->list
, &hdev
->remote_oob_data
);
1452 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1453 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1455 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1460 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
1463 struct bdaddr_list
*b
;
1465 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1466 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1472 int hci_blacklist_clear(struct hci_dev
*hdev
)
1474 struct list_head
*p
, *n
;
1476 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1477 struct bdaddr_list
*b
;
1479 b
= list_entry(p
, struct bdaddr_list
, list
);
1488 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1490 struct bdaddr_list
*entry
;
1492 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1495 if (hci_blacklist_lookup(hdev
, bdaddr
))
1498 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1502 bacpy(&entry
->bdaddr
, bdaddr
);
1504 list_add(&entry
->list
, &hdev
->blacklist
);
1506 return mgmt_device_blocked(hdev
, bdaddr
);
1509 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1511 struct bdaddr_list
*entry
;
1513 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1514 return hci_blacklist_clear(hdev
);
1516 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1520 list_del(&entry
->list
);
1523 return mgmt_device_unblocked(hdev
, bdaddr
);
1526 static void hci_clear_adv_cache(struct work_struct
*work
)
1528 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1533 hci_adv_entries_clear(hdev
);
1535 hci_dev_unlock(hdev
);
1538 int hci_adv_entries_clear(struct hci_dev
*hdev
)
1540 struct adv_entry
*entry
, *tmp
;
1542 list_for_each_entry_safe(entry
, tmp
, &hdev
->adv_entries
, list
) {
1543 list_del(&entry
->list
);
1547 BT_DBG("%s adv cache cleared", hdev
->name
);
1552 struct adv_entry
*hci_find_adv_entry(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1554 struct adv_entry
*entry
;
1556 list_for_each_entry(entry
, &hdev
->adv_entries
, list
)
1557 if (bacmp(bdaddr
, &entry
->bdaddr
) == 0)
1563 static inline int is_connectable_adv(u8 evt_type
)
1565 if (evt_type
== ADV_IND
|| evt_type
== ADV_DIRECT_IND
)
1571 int hci_add_adv_entry(struct hci_dev
*hdev
,
1572 struct hci_ev_le_advertising_info
*ev
)
1574 struct adv_entry
*entry
;
1576 if (!is_connectable_adv(ev
->evt_type
))
1579 /* Only new entries should be added to adv_entries. So, if
1580 * bdaddr was found, don't add it. */
1581 if (hci_find_adv_entry(hdev
, &ev
->bdaddr
))
1584 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
1588 bacpy(&entry
->bdaddr
, &ev
->bdaddr
);
1589 entry
->bdaddr_type
= ev
->bdaddr_type
;
1591 list_add(&entry
->list
, &hdev
->adv_entries
);
1593 BT_DBG("%s adv entry added: address %s type %u", hdev
->name
,
1594 batostr(&entry
->bdaddr
), entry
->bdaddr_type
);
1599 /* Register HCI device */
1600 int hci_register_dev(struct hci_dev
*hdev
)
1602 struct list_head
*head
= &hci_dev_list
, *p
;
1605 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1607 if (!hdev
->open
|| !hdev
->close
)
1610 /* Do not allow HCI_AMP devices to register at index 0,
1611 * so the index can be used as the AMP controller ID.
1613 id
= (hdev
->dev_type
== HCI_BREDR
) ? 0 : 1;
1615 write_lock(&hci_dev_list_lock
);
1617 /* Find first available device id */
1618 list_for_each(p
, &hci_dev_list
) {
1619 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1624 sprintf(hdev
->name
, "hci%d", id
);
1626 list_add_tail(&hdev
->list
, head
);
1628 mutex_init(&hdev
->lock
);
1631 hdev
->dev_flags
= 0;
1632 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1633 hdev
->esco_type
= (ESCO_HV1
);
1634 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1635 hdev
->io_capability
= 0x03; /* No Input No Output */
1637 hdev
->idle_timeout
= 0;
1638 hdev
->sniff_max_interval
= 800;
1639 hdev
->sniff_min_interval
= 80;
1641 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1642 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1643 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1646 skb_queue_head_init(&hdev
->rx_q
);
1647 skb_queue_head_init(&hdev
->cmd_q
);
1648 skb_queue_head_init(&hdev
->raw_q
);
1650 setup_timer(&hdev
->cmd_timer
, hci_cmd_timer
, (unsigned long) hdev
);
1652 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1653 hdev
->reassembly
[i
] = NULL
;
1655 init_waitqueue_head(&hdev
->req_wait_q
);
1656 mutex_init(&hdev
->req_lock
);
1658 discovery_init(hdev
);
1660 hci_conn_hash_init(hdev
);
1662 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1664 INIT_LIST_HEAD(&hdev
->blacklist
);
1666 INIT_LIST_HEAD(&hdev
->uuids
);
1668 INIT_LIST_HEAD(&hdev
->link_keys
);
1669 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1671 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1673 INIT_LIST_HEAD(&hdev
->adv_entries
);
1675 INIT_DELAYED_WORK(&hdev
->adv_work
, hci_clear_adv_cache
);
1676 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1677 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1679 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1681 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1683 atomic_set(&hdev
->promisc
, 0);
1685 write_unlock(&hci_dev_list_lock
);
1687 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1689 if (!hdev
->workqueue
) {
1694 error
= hci_add_sysfs(hdev
);
1698 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1699 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1701 if (rfkill_register(hdev
->rfkill
) < 0) {
1702 rfkill_destroy(hdev
->rfkill
);
1703 hdev
->rfkill
= NULL
;
1707 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1708 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
1709 schedule_work(&hdev
->power_on
);
1711 hci_notify(hdev
, HCI_DEV_REG
);
1717 destroy_workqueue(hdev
->workqueue
);
1719 write_lock(&hci_dev_list_lock
);
1720 list_del(&hdev
->list
);
1721 write_unlock(&hci_dev_list_lock
);
1725 EXPORT_SYMBOL(hci_register_dev
);
1727 /* Unregister HCI device */
1728 void hci_unregister_dev(struct hci_dev
*hdev
)
1732 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1734 write_lock(&hci_dev_list_lock
);
1735 list_del(&hdev
->list
);
1736 write_unlock(&hci_dev_list_lock
);
1738 hci_dev_do_close(hdev
);
1740 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1741 kfree_skb(hdev
->reassembly
[i
]);
1743 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1744 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1746 mgmt_index_removed(hdev
);
1747 hci_dev_unlock(hdev
);
1750 /* mgmt_index_removed should take care of emptying the
1752 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1754 hci_notify(hdev
, HCI_DEV_UNREG
);
1757 rfkill_unregister(hdev
->rfkill
);
1758 rfkill_destroy(hdev
->rfkill
);
1761 hci_del_sysfs(hdev
);
1763 cancel_delayed_work_sync(&hdev
->adv_work
);
1765 destroy_workqueue(hdev
->workqueue
);
1768 hci_blacklist_clear(hdev
);
1769 hci_uuids_clear(hdev
);
1770 hci_link_keys_clear(hdev
);
1771 hci_smp_ltks_clear(hdev
);
1772 hci_remote_oob_data_clear(hdev
);
1773 hci_adv_entries_clear(hdev
);
1774 hci_dev_unlock(hdev
);
1778 EXPORT_SYMBOL(hci_unregister_dev
);
1780 /* Suspend HCI device */
1781 int hci_suspend_dev(struct hci_dev
*hdev
)
1783 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1786 EXPORT_SYMBOL(hci_suspend_dev
);
1788 /* Resume HCI device */
1789 int hci_resume_dev(struct hci_dev
*hdev
)
1791 hci_notify(hdev
, HCI_DEV_RESUME
);
1794 EXPORT_SYMBOL(hci_resume_dev
);
1796 /* Receive frame from HCI drivers */
1797 int hci_recv_frame(struct sk_buff
*skb
)
1799 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1800 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1801 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1807 bt_cb(skb
)->incoming
= 1;
1810 __net_timestamp(skb
);
1812 skb_queue_tail(&hdev
->rx_q
, skb
);
1813 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1817 EXPORT_SYMBOL(hci_recv_frame
);
1819 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1820 int count
, __u8 index
)
1825 struct sk_buff
*skb
;
1826 struct bt_skb_cb
*scb
;
1828 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1829 index
>= NUM_REASSEMBLY
)
1832 skb
= hdev
->reassembly
[index
];
1836 case HCI_ACLDATA_PKT
:
1837 len
= HCI_MAX_FRAME_SIZE
;
1838 hlen
= HCI_ACL_HDR_SIZE
;
1841 len
= HCI_MAX_EVENT_SIZE
;
1842 hlen
= HCI_EVENT_HDR_SIZE
;
1844 case HCI_SCODATA_PKT
:
1845 len
= HCI_MAX_SCO_SIZE
;
1846 hlen
= HCI_SCO_HDR_SIZE
;
1850 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1854 scb
= (void *) skb
->cb
;
1856 scb
->pkt_type
= type
;
1858 skb
->dev
= (void *) hdev
;
1859 hdev
->reassembly
[index
] = skb
;
1863 scb
= (void *) skb
->cb
;
1864 len
= min(scb
->expect
, (__u16
)count
);
1866 memcpy(skb_put(skb
, len
), data
, len
);
1875 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1876 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1877 scb
->expect
= h
->plen
;
1879 if (skb_tailroom(skb
) < scb
->expect
) {
1881 hdev
->reassembly
[index
] = NULL
;
1887 case HCI_ACLDATA_PKT
:
1888 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1889 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1890 scb
->expect
= __le16_to_cpu(h
->dlen
);
1892 if (skb_tailroom(skb
) < scb
->expect
) {
1894 hdev
->reassembly
[index
] = NULL
;
1900 case HCI_SCODATA_PKT
:
1901 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1902 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1903 scb
->expect
= h
->dlen
;
1905 if (skb_tailroom(skb
) < scb
->expect
) {
1907 hdev
->reassembly
[index
] = NULL
;
1914 if (scb
->expect
== 0) {
1915 /* Complete frame */
1917 bt_cb(skb
)->pkt_type
= type
;
1918 hci_recv_frame(skb
);
1920 hdev
->reassembly
[index
] = NULL
;
1928 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1932 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1936 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
1940 data
+= (count
- rem
);
1946 EXPORT_SYMBOL(hci_recv_fragment
);
1948 #define STREAM_REASSEMBLY 0
1950 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1956 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1959 struct { char type
; } *pkt
;
1961 /* Start of the frame */
1968 type
= bt_cb(skb
)->pkt_type
;
1970 rem
= hci_reassembly(hdev
, type
, data
, count
,
1975 data
+= (count
- rem
);
1981 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1983 /* ---- Interface to upper protocols ---- */
1985 int hci_register_cb(struct hci_cb
*cb
)
1987 BT_DBG("%p name %s", cb
, cb
->name
);
1989 write_lock(&hci_cb_list_lock
);
1990 list_add(&cb
->list
, &hci_cb_list
);
1991 write_unlock(&hci_cb_list_lock
);
1995 EXPORT_SYMBOL(hci_register_cb
);
1997 int hci_unregister_cb(struct hci_cb
*cb
)
1999 BT_DBG("%p name %s", cb
, cb
->name
);
2001 write_lock(&hci_cb_list_lock
);
2002 list_del(&cb
->list
);
2003 write_unlock(&hci_cb_list_lock
);
2007 EXPORT_SYMBOL(hci_unregister_cb
);
2009 static int hci_send_frame(struct sk_buff
*skb
)
2011 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2018 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2020 if (atomic_read(&hdev
->promisc
)) {
2022 __net_timestamp(skb
);
2024 hci_send_to_sock(hdev
, skb
, NULL
);
2027 /* Get rid of skb owner, prior to sending to the driver. */
2030 return hdev
->send(skb
);
2033 /* Send HCI command */
2034 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2036 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2037 struct hci_command_hdr
*hdr
;
2038 struct sk_buff
*skb
;
2040 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
2042 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2044 BT_ERR("%s no memory for command", hdev
->name
);
2048 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2049 hdr
->opcode
= cpu_to_le16(opcode
);
2053 memcpy(skb_put(skb
, plen
), param
, plen
);
2055 BT_DBG("skb len %d", skb
->len
);
2057 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2058 skb
->dev
= (void *) hdev
;
2060 if (test_bit(HCI_INIT
, &hdev
->flags
))
2061 hdev
->init_last_cmd
= opcode
;
2063 skb_queue_tail(&hdev
->cmd_q
, skb
);
2064 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2069 /* Get data from the previously sent command */
2070 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2072 struct hci_command_hdr
*hdr
;
2074 if (!hdev
->sent_cmd
)
2077 hdr
= (void *) hdev
->sent_cmd
->data
;
2079 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2082 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
2084 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2088 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2090 struct hci_acl_hdr
*hdr
;
2093 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2094 skb_reset_transport_header(skb
);
2095 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2096 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2097 hdr
->dlen
= cpu_to_le16(len
);
2100 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
2101 struct sk_buff
*skb
, __u16 flags
)
2103 struct hci_dev
*hdev
= conn
->hdev
;
2104 struct sk_buff
*list
;
2106 list
= skb_shinfo(skb
)->frag_list
;
2108 /* Non fragmented */
2109 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2111 skb_queue_tail(queue
, skb
);
2114 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2116 skb_shinfo(skb
)->frag_list
= NULL
;
2118 /* Queue all fragments atomically */
2119 spin_lock(&queue
->lock
);
2121 __skb_queue_tail(queue
, skb
);
2123 flags
&= ~ACL_START
;
2126 skb
= list
; list
= list
->next
;
2128 skb
->dev
= (void *) hdev
;
2129 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2130 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2132 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2134 __skb_queue_tail(queue
, skb
);
2137 spin_unlock(&queue
->lock
);
2141 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2143 struct hci_conn
*conn
= chan
->conn
;
2144 struct hci_dev
*hdev
= conn
->hdev
;
2146 BT_DBG("%s chan %p flags 0x%x", hdev
->name
, chan
, flags
);
2148 skb
->dev
= (void *) hdev
;
2149 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2150 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2152 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
2154 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2156 EXPORT_SYMBOL(hci_send_acl
);
2159 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2161 struct hci_dev
*hdev
= conn
->hdev
;
2162 struct hci_sco_hdr hdr
;
2164 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2166 hdr
.handle
= cpu_to_le16(conn
->handle
);
2167 hdr
.dlen
= skb
->len
;
2169 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2170 skb_reset_transport_header(skb
);
2171 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2173 skb
->dev
= (void *) hdev
;
2174 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2176 skb_queue_tail(&conn
->data_q
, skb
);
2177 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2179 EXPORT_SYMBOL(hci_send_sco
);
2181 /* ---- HCI TX task (outgoing data) ---- */
2183 /* HCI Connection scheduler */
2184 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
2186 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2187 struct hci_conn
*conn
= NULL
, *c
;
2188 int num
= 0, min
= ~0;
2190 /* We don't have to lock device here. Connections are always
2191 * added and removed with TX task disabled. */
2195 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2196 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2199 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2204 if (c
->sent
< min
) {
2209 if (hci_conn_num(hdev
, type
) == num
)
2218 switch (conn
->type
) {
2220 cnt
= hdev
->acl_cnt
;
2224 cnt
= hdev
->sco_cnt
;
2227 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2231 BT_ERR("Unknown link type");
2239 BT_DBG("conn %p quote %d", conn
, *quote
);
2243 static inline void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2245 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2248 BT_ERR("%s link tx timeout", hdev
->name
);
2252 /* Kill stalled connections */
2253 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2254 if (c
->type
== type
&& c
->sent
) {
2255 BT_ERR("%s killing stalled connection %s",
2256 hdev
->name
, batostr(&c
->dst
));
2257 hci_acl_disconn(c
, 0x13);
2264 static inline struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2267 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2268 struct hci_chan
*chan
= NULL
;
2269 int num
= 0, min
= ~0, cur_prio
= 0;
2270 struct hci_conn
*conn
;
2271 int cnt
, q
, conn_num
= 0;
2273 BT_DBG("%s", hdev
->name
);
2277 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2278 struct hci_chan
*tmp
;
2280 if (conn
->type
!= type
)
2283 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2288 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2289 struct sk_buff
*skb
;
2291 if (skb_queue_empty(&tmp
->data_q
))
2294 skb
= skb_peek(&tmp
->data_q
);
2295 if (skb
->priority
< cur_prio
)
2298 if (skb
->priority
> cur_prio
) {
2301 cur_prio
= skb
->priority
;
2306 if (conn
->sent
< min
) {
2312 if (hci_conn_num(hdev
, type
) == conn_num
)
2321 switch (chan
->conn
->type
) {
2323 cnt
= hdev
->acl_cnt
;
2327 cnt
= hdev
->sco_cnt
;
2330 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2334 BT_ERR("Unknown link type");
2339 BT_DBG("chan %p quote %d", chan
, *quote
);
2343 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2345 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2346 struct hci_conn
*conn
;
2349 BT_DBG("%s", hdev
->name
);
2353 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2354 struct hci_chan
*chan
;
2356 if (conn
->type
!= type
)
2359 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2364 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2365 struct sk_buff
*skb
;
2372 if (skb_queue_empty(&chan
->data_q
))
2375 skb
= skb_peek(&chan
->data_q
);
2376 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2379 skb
->priority
= HCI_PRIO_MAX
- 1;
2381 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2385 if (hci_conn_num(hdev
, type
) == num
)
2393 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2395 /* Calculate count of blocks used by this packet */
2396 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2399 static inline void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2401 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2402 /* ACL tx timeout must be longer than maximum
2403 * link supervision timeout (40.9 seconds) */
2404 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2405 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT
)))
2406 hci_link_tx_to(hdev
, ACL_LINK
);
2410 static inline void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2412 unsigned int cnt
= hdev
->acl_cnt
;
2413 struct hci_chan
*chan
;
2414 struct sk_buff
*skb
;
2417 __check_timeout(hdev
, cnt
);
2419 while (hdev
->acl_cnt
&&
2420 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2421 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2422 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2423 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2424 skb
->len
, skb
->priority
);
2426 /* Stop if priority has changed */
2427 if (skb
->priority
< priority
)
2430 skb
= skb_dequeue(&chan
->data_q
);
2432 hci_conn_enter_active_mode(chan
->conn
,
2433 bt_cb(skb
)->force_active
);
2435 hci_send_frame(skb
);
2436 hdev
->acl_last_tx
= jiffies
;
2444 if (cnt
!= hdev
->acl_cnt
)
2445 hci_prio_recalculate(hdev
, ACL_LINK
);
2448 static inline void hci_sched_acl_blk(struct hci_dev
*hdev
)
2450 unsigned int cnt
= hdev
->block_cnt
;
2451 struct hci_chan
*chan
;
2452 struct sk_buff
*skb
;
2455 __check_timeout(hdev
, cnt
);
2457 while (hdev
->block_cnt
> 0 &&
2458 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2459 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2460 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2463 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2464 skb
->len
, skb
->priority
);
2466 /* Stop if priority has changed */
2467 if (skb
->priority
< priority
)
2470 skb
= skb_dequeue(&chan
->data_q
);
2472 blocks
= __get_blocks(hdev
, skb
);
2473 if (blocks
> hdev
->block_cnt
)
2476 hci_conn_enter_active_mode(chan
->conn
,
2477 bt_cb(skb
)->force_active
);
2479 hci_send_frame(skb
);
2480 hdev
->acl_last_tx
= jiffies
;
2482 hdev
->block_cnt
-= blocks
;
2485 chan
->sent
+= blocks
;
2486 chan
->conn
->sent
+= blocks
;
2490 if (cnt
!= hdev
->block_cnt
)
2491 hci_prio_recalculate(hdev
, ACL_LINK
);
2494 static inline void hci_sched_acl(struct hci_dev
*hdev
)
2496 BT_DBG("%s", hdev
->name
);
2498 if (!hci_conn_num(hdev
, ACL_LINK
))
2501 switch (hdev
->flow_ctl_mode
) {
2502 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
2503 hci_sched_acl_pkt(hdev
);
2506 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
2507 hci_sched_acl_blk(hdev
);
2513 static inline void hci_sched_sco(struct hci_dev
*hdev
)
2515 struct hci_conn
*conn
;
2516 struct sk_buff
*skb
;
2519 BT_DBG("%s", hdev
->name
);
2521 if (!hci_conn_num(hdev
, SCO_LINK
))
2524 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2525 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2526 BT_DBG("skb %p len %d", skb
, skb
->len
);
2527 hci_send_frame(skb
);
2530 if (conn
->sent
== ~0)
2536 static inline void hci_sched_esco(struct hci_dev
*hdev
)
2538 struct hci_conn
*conn
;
2539 struct sk_buff
*skb
;
2542 BT_DBG("%s", hdev
->name
);
2544 if (!hci_conn_num(hdev
, ESCO_LINK
))
2547 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
2548 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2549 BT_DBG("skb %p len %d", skb
, skb
->len
);
2550 hci_send_frame(skb
);
2553 if (conn
->sent
== ~0)
2559 static inline void hci_sched_le(struct hci_dev
*hdev
)
2561 struct hci_chan
*chan
;
2562 struct sk_buff
*skb
;
2563 int quote
, cnt
, tmp
;
2565 BT_DBG("%s", hdev
->name
);
2567 if (!hci_conn_num(hdev
, LE_LINK
))
2570 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2571 /* LE tx timeout must be longer than maximum
2572 * link supervision timeout (40.9 seconds) */
2573 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2574 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2575 hci_link_tx_to(hdev
, LE_LINK
);
2578 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2580 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2581 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2582 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2583 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2584 skb
->len
, skb
->priority
);
2586 /* Stop if priority has changed */
2587 if (skb
->priority
< priority
)
2590 skb
= skb_dequeue(&chan
->data_q
);
2592 hci_send_frame(skb
);
2593 hdev
->le_last_tx
= jiffies
;
2604 hdev
->acl_cnt
= cnt
;
2607 hci_prio_recalculate(hdev
, LE_LINK
);
2610 static void hci_tx_work(struct work_struct
*work
)
2612 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2613 struct sk_buff
*skb
;
2615 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2616 hdev
->sco_cnt
, hdev
->le_cnt
);
2618 /* Schedule queues and send stuff to HCI driver */
2620 hci_sched_acl(hdev
);
2622 hci_sched_sco(hdev
);
2624 hci_sched_esco(hdev
);
2628 /* Send next queued raw (unknown type) packet */
2629 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2630 hci_send_frame(skb
);
2633 /* ----- HCI RX task (incoming data processing) ----- */
2635 /* ACL data packet */
2636 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2638 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2639 struct hci_conn
*conn
;
2640 __u16 handle
, flags
;
2642 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2644 handle
= __le16_to_cpu(hdr
->handle
);
2645 flags
= hci_flags(handle
);
2646 handle
= hci_handle(handle
);
2648 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
2650 hdev
->stat
.acl_rx
++;
2653 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2654 hci_dev_unlock(hdev
);
2657 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2659 /* Send to upper protocol */
2660 l2cap_recv_acldata(conn
, skb
, flags
);
2663 BT_ERR("%s ACL packet for unknown connection handle %d",
2664 hdev
->name
, handle
);
2670 /* SCO data packet */
2671 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2673 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2674 struct hci_conn
*conn
;
2677 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2679 handle
= __le16_to_cpu(hdr
->handle
);
2681 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
2683 hdev
->stat
.sco_rx
++;
2686 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2687 hci_dev_unlock(hdev
);
2690 /* Send to upper protocol */
2691 sco_recv_scodata(conn
, skb
);
2694 BT_ERR("%s SCO packet for unknown connection handle %d",
2695 hdev
->name
, handle
);
2701 static void hci_rx_work(struct work_struct
*work
)
2703 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2704 struct sk_buff
*skb
;
2706 BT_DBG("%s", hdev
->name
);
2708 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2709 if (atomic_read(&hdev
->promisc
)) {
2710 /* Send copy to the sockets */
2711 hci_send_to_sock(hdev
, skb
, NULL
);
2714 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2719 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2720 /* Don't process data packets in this states. */
2721 switch (bt_cb(skb
)->pkt_type
) {
2722 case HCI_ACLDATA_PKT
:
2723 case HCI_SCODATA_PKT
:
2730 switch (bt_cb(skb
)->pkt_type
) {
2732 BT_DBG("%s Event packet", hdev
->name
);
2733 hci_event_packet(hdev
, skb
);
2736 case HCI_ACLDATA_PKT
:
2737 BT_DBG("%s ACL data packet", hdev
->name
);
2738 hci_acldata_packet(hdev
, skb
);
2741 case HCI_SCODATA_PKT
:
2742 BT_DBG("%s SCO data packet", hdev
->name
);
2743 hci_scodata_packet(hdev
, skb
);
2753 static void hci_cmd_work(struct work_struct
*work
)
2755 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2756 struct sk_buff
*skb
;
2758 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2760 /* Send queued commands */
2761 if (atomic_read(&hdev
->cmd_cnt
)) {
2762 skb
= skb_dequeue(&hdev
->cmd_q
);
2766 kfree_skb(hdev
->sent_cmd
);
2768 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2769 if (hdev
->sent_cmd
) {
2770 atomic_dec(&hdev
->cmd_cnt
);
2771 hci_send_frame(skb
);
2772 if (test_bit(HCI_RESET
, &hdev
->flags
))
2773 del_timer(&hdev
->cmd_timer
);
2775 mod_timer(&hdev
->cmd_timer
,
2776 jiffies
+ msecs_to_jiffies(HCI_CMD_TIMEOUT
));
2778 skb_queue_head(&hdev
->cmd_q
, skb
);
2779 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2784 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2786 /* General inquiry access code (GIAC) */
2787 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2788 struct hci_cp_inquiry cp
;
2790 BT_DBG("%s", hdev
->name
);
2792 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2793 return -EINPROGRESS
;
2795 inquiry_cache_flush(hdev
);
2797 memset(&cp
, 0, sizeof(cp
));
2798 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2801 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2804 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2806 BT_DBG("%s", hdev
->name
);
2808 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2811 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2814 module_param(enable_hs
, bool, 0644);
2815 MODULE_PARM_DESC(enable_hs
, "Enable High Speed");