2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
42 static void hci_rx_work(struct work_struct
*work
);
43 static void hci_cmd_work(struct work_struct
*work
);
44 static void hci_tx_work(struct work_struct
*work
);
47 LIST_HEAD(hci_dev_list
);
48 DEFINE_RWLOCK(hci_dev_list_lock
);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list
);
52 DEFINE_RWLOCK(hci_cb_list_lock
);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida
);
57 /* ----- HCI requests ----- */
59 #define HCI_REQ_DONE 0
60 #define HCI_REQ_PEND 1
61 #define HCI_REQ_CANCELED 2
63 #define hci_req_lock(d) mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
66 /* ---- HCI notifications ---- */
68 static void hci_notify(struct hci_dev
*hdev
, int event
)
70 hci_sock_dev_event(hdev
, event
);
73 /* ---- HCI debugfs entries ---- */
75 static ssize_t
dut_mode_read(struct file
*file
, char __user
*user_buf
,
76 size_t count
, loff_t
*ppos
)
78 struct hci_dev
*hdev
= file
->private_data
;
81 buf
[0] = test_bit(HCI_DUT_MODE
, &hdev
->dbg_flags
) ? 'Y': 'N';
84 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
87 static ssize_t
dut_mode_write(struct file
*file
, const char __user
*user_buf
,
88 size_t count
, loff_t
*ppos
)
90 struct hci_dev
*hdev
= file
->private_data
;
93 size_t buf_size
= min(count
, (sizeof(buf
)-1));
97 if (!test_bit(HCI_UP
, &hdev
->flags
))
100 if (copy_from_user(buf
, user_buf
, buf_size
))
103 buf
[buf_size
] = '\0';
104 if (strtobool(buf
, &enable
))
107 if (enable
== test_bit(HCI_DUT_MODE
, &hdev
->dbg_flags
))
112 skb
= __hci_cmd_sync(hdev
, HCI_OP_ENABLE_DUT_MODE
, 0, NULL
,
115 skb
= __hci_cmd_sync(hdev
, HCI_OP_RESET
, 0, NULL
,
117 hci_req_unlock(hdev
);
122 err
= -bt_to_errno(skb
->data
[0]);
128 change_bit(HCI_DUT_MODE
, &hdev
->dbg_flags
);
133 static const struct file_operations dut_mode_fops
= {
135 .read
= dut_mode_read
,
136 .write
= dut_mode_write
,
137 .llseek
= default_llseek
,
140 static int features_show(struct seq_file
*f
, void *ptr
)
142 struct hci_dev
*hdev
= f
->private;
146 for (p
= 0; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
147 seq_printf(f
, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p
,
149 hdev
->features
[p
][0], hdev
->features
[p
][1],
150 hdev
->features
[p
][2], hdev
->features
[p
][3],
151 hdev
->features
[p
][4], hdev
->features
[p
][5],
152 hdev
->features
[p
][6], hdev
->features
[p
][7]);
154 if (lmp_le_capable(hdev
))
155 seq_printf(f
, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev
->le_features
[0], hdev
->le_features
[1],
158 hdev
->le_features
[2], hdev
->le_features
[3],
159 hdev
->le_features
[4], hdev
->le_features
[5],
160 hdev
->le_features
[6], hdev
->le_features
[7]);
161 hci_dev_unlock(hdev
);
166 static int features_open(struct inode
*inode
, struct file
*file
)
168 return single_open(file
, features_show
, inode
->i_private
);
171 static const struct file_operations features_fops
= {
172 .open
= features_open
,
175 .release
= single_release
,
178 static int blacklist_show(struct seq_file
*f
, void *p
)
180 struct hci_dev
*hdev
= f
->private;
181 struct bdaddr_list
*b
;
184 list_for_each_entry(b
, &hdev
->blacklist
, list
)
185 seq_printf(f
, "%pMR (type %u)\n", &b
->bdaddr
, b
->bdaddr_type
);
186 hci_dev_unlock(hdev
);
191 static int blacklist_open(struct inode
*inode
, struct file
*file
)
193 return single_open(file
, blacklist_show
, inode
->i_private
);
196 static const struct file_operations blacklist_fops
= {
197 .open
= blacklist_open
,
200 .release
= single_release
,
203 static int uuids_show(struct seq_file
*f
, void *p
)
205 struct hci_dev
*hdev
= f
->private;
206 struct bt_uuid
*uuid
;
209 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
216 for (i
= 0; i
< 16; i
++)
217 val
[i
] = uuid
->uuid
[15 - i
];
219 seq_printf(f
, "%pUb\n", val
);
221 hci_dev_unlock(hdev
);
226 static int uuids_open(struct inode
*inode
, struct file
*file
)
228 return single_open(file
, uuids_show
, inode
->i_private
);
231 static const struct file_operations uuids_fops
= {
235 .release
= single_release
,
238 static int inquiry_cache_show(struct seq_file
*f
, void *p
)
240 struct hci_dev
*hdev
= f
->private;
241 struct discovery_state
*cache
= &hdev
->discovery
;
242 struct inquiry_entry
*e
;
246 list_for_each_entry(e
, &cache
->all
, all
) {
247 struct inquiry_data
*data
= &e
->data
;
248 seq_printf(f
, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
250 data
->pscan_rep_mode
, data
->pscan_period_mode
,
251 data
->pscan_mode
, data
->dev_class
[2],
252 data
->dev_class
[1], data
->dev_class
[0],
253 __le16_to_cpu(data
->clock_offset
),
254 data
->rssi
, data
->ssp_mode
, e
->timestamp
);
257 hci_dev_unlock(hdev
);
262 static int inquiry_cache_open(struct inode
*inode
, struct file
*file
)
264 return single_open(file
, inquiry_cache_show
, inode
->i_private
);
267 static const struct file_operations inquiry_cache_fops
= {
268 .open
= inquiry_cache_open
,
271 .release
= single_release
,
274 static int link_keys_show(struct seq_file
*f
, void *ptr
)
276 struct hci_dev
*hdev
= f
->private;
277 struct list_head
*p
, *n
;
280 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
281 struct link_key
*key
= list_entry(p
, struct link_key
, list
);
282 seq_printf(f
, "%pMR %u %*phN %u\n", &key
->bdaddr
, key
->type
,
283 HCI_LINK_KEY_SIZE
, key
->val
, key
->pin_len
);
285 hci_dev_unlock(hdev
);
290 static int link_keys_open(struct inode
*inode
, struct file
*file
)
292 return single_open(file
, link_keys_show
, inode
->i_private
);
295 static const struct file_operations link_keys_fops
= {
296 .open
= link_keys_open
,
299 .release
= single_release
,
302 static int dev_class_show(struct seq_file
*f
, void *ptr
)
304 struct hci_dev
*hdev
= f
->private;
307 seq_printf(f
, "0x%.2x%.2x%.2x\n", hdev
->dev_class
[2],
308 hdev
->dev_class
[1], hdev
->dev_class
[0]);
309 hci_dev_unlock(hdev
);
314 static int dev_class_open(struct inode
*inode
, struct file
*file
)
316 return single_open(file
, dev_class_show
, inode
->i_private
);
319 static const struct file_operations dev_class_fops
= {
320 .open
= dev_class_open
,
323 .release
= single_release
,
326 static int voice_setting_get(void *data
, u64
*val
)
328 struct hci_dev
*hdev
= data
;
331 *val
= hdev
->voice_setting
;
332 hci_dev_unlock(hdev
);
337 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops
, voice_setting_get
,
338 NULL
, "0x%4.4llx\n");
340 static int auto_accept_delay_set(void *data
, u64 val
)
342 struct hci_dev
*hdev
= data
;
345 hdev
->auto_accept_delay
= val
;
346 hci_dev_unlock(hdev
);
351 static int auto_accept_delay_get(void *data
, u64
*val
)
353 struct hci_dev
*hdev
= data
;
356 *val
= hdev
->auto_accept_delay
;
357 hci_dev_unlock(hdev
);
362 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops
, auto_accept_delay_get
,
363 auto_accept_delay_set
, "%llu\n");
365 static ssize_t
force_sc_support_read(struct file
*file
, char __user
*user_buf
,
366 size_t count
, loff_t
*ppos
)
368 struct hci_dev
*hdev
= file
->private_data
;
371 buf
[0] = test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
) ? 'Y': 'N';
374 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
377 static ssize_t
force_sc_support_write(struct file
*file
,
378 const char __user
*user_buf
,
379 size_t count
, loff_t
*ppos
)
381 struct hci_dev
*hdev
= file
->private_data
;
383 size_t buf_size
= min(count
, (sizeof(buf
)-1));
386 if (test_bit(HCI_UP
, &hdev
->flags
))
389 if (copy_from_user(buf
, user_buf
, buf_size
))
392 buf
[buf_size
] = '\0';
393 if (strtobool(buf
, &enable
))
396 if (enable
== test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
))
399 change_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
);
404 static const struct file_operations force_sc_support_fops
= {
406 .read
= force_sc_support_read
,
407 .write
= force_sc_support_write
,
408 .llseek
= default_llseek
,
411 static ssize_t
sc_only_mode_read(struct file
*file
, char __user
*user_buf
,
412 size_t count
, loff_t
*ppos
)
414 struct hci_dev
*hdev
= file
->private_data
;
417 buf
[0] = test_bit(HCI_SC_ONLY
, &hdev
->dev_flags
) ? 'Y': 'N';
420 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
423 static const struct file_operations sc_only_mode_fops
= {
425 .read
= sc_only_mode_read
,
426 .llseek
= default_llseek
,
429 static int idle_timeout_set(void *data
, u64 val
)
431 struct hci_dev
*hdev
= data
;
433 if (val
!= 0 && (val
< 500 || val
> 3600000))
437 hdev
->idle_timeout
= val
;
438 hci_dev_unlock(hdev
);
443 static int idle_timeout_get(void *data
, u64
*val
)
445 struct hci_dev
*hdev
= data
;
448 *val
= hdev
->idle_timeout
;
449 hci_dev_unlock(hdev
);
454 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops
, idle_timeout_get
,
455 idle_timeout_set
, "%llu\n");
457 static int rpa_timeout_set(void *data
, u64 val
)
459 struct hci_dev
*hdev
= data
;
461 /* Require the RPA timeout to be at least 30 seconds and at most
464 if (val
< 30 || val
> (60 * 60 * 24))
468 hdev
->rpa_timeout
= val
;
469 hci_dev_unlock(hdev
);
474 static int rpa_timeout_get(void *data
, u64
*val
)
476 struct hci_dev
*hdev
= data
;
479 *val
= hdev
->rpa_timeout
;
480 hci_dev_unlock(hdev
);
485 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops
, rpa_timeout_get
,
486 rpa_timeout_set
, "%llu\n");
488 static int sniff_min_interval_set(void *data
, u64 val
)
490 struct hci_dev
*hdev
= data
;
492 if (val
== 0 || val
% 2 || val
> hdev
->sniff_max_interval
)
496 hdev
->sniff_min_interval
= val
;
497 hci_dev_unlock(hdev
);
502 static int sniff_min_interval_get(void *data
, u64
*val
)
504 struct hci_dev
*hdev
= data
;
507 *val
= hdev
->sniff_min_interval
;
508 hci_dev_unlock(hdev
);
513 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops
, sniff_min_interval_get
,
514 sniff_min_interval_set
, "%llu\n");
516 static int sniff_max_interval_set(void *data
, u64 val
)
518 struct hci_dev
*hdev
= data
;
520 if (val
== 0 || val
% 2 || val
< hdev
->sniff_min_interval
)
524 hdev
->sniff_max_interval
= val
;
525 hci_dev_unlock(hdev
);
530 static int sniff_max_interval_get(void *data
, u64
*val
)
532 struct hci_dev
*hdev
= data
;
535 *val
= hdev
->sniff_max_interval
;
536 hci_dev_unlock(hdev
);
541 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops
, sniff_max_interval_get
,
542 sniff_max_interval_set
, "%llu\n");
544 static int conn_info_min_age_set(void *data
, u64 val
)
546 struct hci_dev
*hdev
= data
;
548 if (val
== 0 || val
> hdev
->conn_info_max_age
)
552 hdev
->conn_info_min_age
= val
;
553 hci_dev_unlock(hdev
);
558 static int conn_info_min_age_get(void *data
, u64
*val
)
560 struct hci_dev
*hdev
= data
;
563 *val
= hdev
->conn_info_min_age
;
564 hci_dev_unlock(hdev
);
569 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops
, conn_info_min_age_get
,
570 conn_info_min_age_set
, "%llu\n");
572 static int conn_info_max_age_set(void *data
, u64 val
)
574 struct hci_dev
*hdev
= data
;
576 if (val
== 0 || val
< hdev
->conn_info_min_age
)
580 hdev
->conn_info_max_age
= val
;
581 hci_dev_unlock(hdev
);
586 static int conn_info_max_age_get(void *data
, u64
*val
)
588 struct hci_dev
*hdev
= data
;
591 *val
= hdev
->conn_info_max_age
;
592 hci_dev_unlock(hdev
);
597 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops
, conn_info_max_age_get
,
598 conn_info_max_age_set
, "%llu\n");
600 static int identity_show(struct seq_file
*f
, void *p
)
602 struct hci_dev
*hdev
= f
->private;
608 hci_copy_identity_address(hdev
, &addr
, &addr_type
);
610 seq_printf(f
, "%pMR (type %u) %*phN %pMR\n", &addr
, addr_type
,
611 16, hdev
->irk
, &hdev
->rpa
);
613 hci_dev_unlock(hdev
);
618 static int identity_open(struct inode
*inode
, struct file
*file
)
620 return single_open(file
, identity_show
, inode
->i_private
);
623 static const struct file_operations identity_fops
= {
624 .open
= identity_open
,
627 .release
= single_release
,
630 static int random_address_show(struct seq_file
*f
, void *p
)
632 struct hci_dev
*hdev
= f
->private;
635 seq_printf(f
, "%pMR\n", &hdev
->random_addr
);
636 hci_dev_unlock(hdev
);
641 static int random_address_open(struct inode
*inode
, struct file
*file
)
643 return single_open(file
, random_address_show
, inode
->i_private
);
646 static const struct file_operations random_address_fops
= {
647 .open
= random_address_open
,
650 .release
= single_release
,
653 static int static_address_show(struct seq_file
*f
, void *p
)
655 struct hci_dev
*hdev
= f
->private;
658 seq_printf(f
, "%pMR\n", &hdev
->static_addr
);
659 hci_dev_unlock(hdev
);
664 static int static_address_open(struct inode
*inode
, struct file
*file
)
666 return single_open(file
, static_address_show
, inode
->i_private
);
669 static const struct file_operations static_address_fops
= {
670 .open
= static_address_open
,
673 .release
= single_release
,
676 static ssize_t
force_static_address_read(struct file
*file
,
677 char __user
*user_buf
,
678 size_t count
, loff_t
*ppos
)
680 struct hci_dev
*hdev
= file
->private_data
;
683 buf
[0] = test_bit(HCI_FORCE_STATIC_ADDR
, &hdev
->dbg_flags
) ? 'Y': 'N';
686 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
689 static ssize_t
force_static_address_write(struct file
*file
,
690 const char __user
*user_buf
,
691 size_t count
, loff_t
*ppos
)
693 struct hci_dev
*hdev
= file
->private_data
;
695 size_t buf_size
= min(count
, (sizeof(buf
)-1));
698 if (test_bit(HCI_UP
, &hdev
->flags
))
701 if (copy_from_user(buf
, user_buf
, buf_size
))
704 buf
[buf_size
] = '\0';
705 if (strtobool(buf
, &enable
))
708 if (enable
== test_bit(HCI_FORCE_STATIC_ADDR
, &hdev
->dbg_flags
))
711 change_bit(HCI_FORCE_STATIC_ADDR
, &hdev
->dbg_flags
);
716 static const struct file_operations force_static_address_fops
= {
718 .read
= force_static_address_read
,
719 .write
= force_static_address_write
,
720 .llseek
= default_llseek
,
723 static int white_list_show(struct seq_file
*f
, void *ptr
)
725 struct hci_dev
*hdev
= f
->private;
726 struct bdaddr_list
*b
;
729 list_for_each_entry(b
, &hdev
->le_white_list
, list
)
730 seq_printf(f
, "%pMR (type %u)\n", &b
->bdaddr
, b
->bdaddr_type
);
731 hci_dev_unlock(hdev
);
736 static int white_list_open(struct inode
*inode
, struct file
*file
)
738 return single_open(file
, white_list_show
, inode
->i_private
);
741 static const struct file_operations white_list_fops
= {
742 .open
= white_list_open
,
745 .release
= single_release
,
748 static int identity_resolving_keys_show(struct seq_file
*f
, void *ptr
)
750 struct hci_dev
*hdev
= f
->private;
754 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
755 seq_printf(f
, "%pMR (type %u) %*phN %pMR\n",
756 &irk
->bdaddr
, irk
->addr_type
,
757 16, irk
->val
, &irk
->rpa
);
764 static int identity_resolving_keys_open(struct inode
*inode
, struct file
*file
)
766 return single_open(file
, identity_resolving_keys_show
,
770 static const struct file_operations identity_resolving_keys_fops
= {
771 .open
= identity_resolving_keys_open
,
774 .release
= single_release
,
777 static int long_term_keys_show(struct seq_file
*f
, void *ptr
)
779 struct hci_dev
*hdev
= f
->private;
783 list_for_each_entry_rcu(ltk
, &hdev
->long_term_keys
, list
)
784 seq_printf(f
, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
785 <k
->bdaddr
, ltk
->bdaddr_type
, ltk
->authenticated
,
786 ltk
->type
, ltk
->enc_size
, __le16_to_cpu(ltk
->ediv
),
787 __le64_to_cpu(ltk
->rand
), 16, ltk
->val
);
793 static int long_term_keys_open(struct inode
*inode
, struct file
*file
)
795 return single_open(file
, long_term_keys_show
, inode
->i_private
);
798 static const struct file_operations long_term_keys_fops
= {
799 .open
= long_term_keys_open
,
802 .release
= single_release
,
805 static int conn_min_interval_set(void *data
, u64 val
)
807 struct hci_dev
*hdev
= data
;
809 if (val
< 0x0006 || val
> 0x0c80 || val
> hdev
->le_conn_max_interval
)
813 hdev
->le_conn_min_interval
= val
;
814 hci_dev_unlock(hdev
);
819 static int conn_min_interval_get(void *data
, u64
*val
)
821 struct hci_dev
*hdev
= data
;
824 *val
= hdev
->le_conn_min_interval
;
825 hci_dev_unlock(hdev
);
830 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops
, conn_min_interval_get
,
831 conn_min_interval_set
, "%llu\n");
833 static int conn_max_interval_set(void *data
, u64 val
)
835 struct hci_dev
*hdev
= data
;
837 if (val
< 0x0006 || val
> 0x0c80 || val
< hdev
->le_conn_min_interval
)
841 hdev
->le_conn_max_interval
= val
;
842 hci_dev_unlock(hdev
);
847 static int conn_max_interval_get(void *data
, u64
*val
)
849 struct hci_dev
*hdev
= data
;
852 *val
= hdev
->le_conn_max_interval
;
853 hci_dev_unlock(hdev
);
858 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops
, conn_max_interval_get
,
859 conn_max_interval_set
, "%llu\n");
861 static int conn_latency_set(void *data
, u64 val
)
863 struct hci_dev
*hdev
= data
;
869 hdev
->le_conn_latency
= val
;
870 hci_dev_unlock(hdev
);
875 static int conn_latency_get(void *data
, u64
*val
)
877 struct hci_dev
*hdev
= data
;
880 *val
= hdev
->le_conn_latency
;
881 hci_dev_unlock(hdev
);
886 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops
, conn_latency_get
,
887 conn_latency_set
, "%llu\n");
889 static int supervision_timeout_set(void *data
, u64 val
)
891 struct hci_dev
*hdev
= data
;
893 if (val
< 0x000a || val
> 0x0c80)
897 hdev
->le_supv_timeout
= val
;
898 hci_dev_unlock(hdev
);
903 static int supervision_timeout_get(void *data
, u64
*val
)
905 struct hci_dev
*hdev
= data
;
908 *val
= hdev
->le_supv_timeout
;
909 hci_dev_unlock(hdev
);
914 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops
, supervision_timeout_get
,
915 supervision_timeout_set
, "%llu\n");
917 static int adv_channel_map_set(void *data
, u64 val
)
919 struct hci_dev
*hdev
= data
;
921 if (val
< 0x01 || val
> 0x07)
925 hdev
->le_adv_channel_map
= val
;
926 hci_dev_unlock(hdev
);
931 static int adv_channel_map_get(void *data
, u64
*val
)
933 struct hci_dev
*hdev
= data
;
936 *val
= hdev
->le_adv_channel_map
;
937 hci_dev_unlock(hdev
);
942 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops
, adv_channel_map_get
,
943 adv_channel_map_set
, "%llu\n");
945 static int adv_min_interval_set(void *data
, u64 val
)
947 struct hci_dev
*hdev
= data
;
949 if (val
< 0x0020 || val
> 0x4000 || val
> hdev
->le_adv_max_interval
)
953 hdev
->le_adv_min_interval
= val
;
954 hci_dev_unlock(hdev
);
959 static int adv_min_interval_get(void *data
, u64
*val
)
961 struct hci_dev
*hdev
= data
;
964 *val
= hdev
->le_adv_min_interval
;
965 hci_dev_unlock(hdev
);
970 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops
, adv_min_interval_get
,
971 adv_min_interval_set
, "%llu\n");
973 static int adv_max_interval_set(void *data
, u64 val
)
975 struct hci_dev
*hdev
= data
;
977 if (val
< 0x0020 || val
> 0x4000 || val
< hdev
->le_adv_min_interval
)
981 hdev
->le_adv_max_interval
= val
;
982 hci_dev_unlock(hdev
);
987 static int adv_max_interval_get(void *data
, u64
*val
)
989 struct hci_dev
*hdev
= data
;
992 *val
= hdev
->le_adv_max_interval
;
993 hci_dev_unlock(hdev
);
998 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops
, adv_max_interval_get
,
999 adv_max_interval_set
, "%llu\n");
1001 static int device_list_show(struct seq_file
*f
, void *ptr
)
1003 struct hci_dev
*hdev
= f
->private;
1004 struct hci_conn_params
*p
;
1005 struct bdaddr_list
*b
;
1008 list_for_each_entry(b
, &hdev
->whitelist
, list
)
1009 seq_printf(f
, "%pMR (type %u)\n", &b
->bdaddr
, b
->bdaddr_type
);
1010 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
1011 seq_printf(f
, "%pMR (type %u) %u\n", &p
->addr
, p
->addr_type
,
1014 hci_dev_unlock(hdev
);
1019 static int device_list_open(struct inode
*inode
, struct file
*file
)
1021 return single_open(file
, device_list_show
, inode
->i_private
);
1024 static const struct file_operations device_list_fops
= {
1025 .open
= device_list_open
,
1027 .llseek
= seq_lseek
,
1028 .release
= single_release
,
1031 /* ---- HCI requests ---- */
1033 static void hci_req_sync_complete(struct hci_dev
*hdev
, u8 result
)
1035 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
1037 if (hdev
->req_status
== HCI_REQ_PEND
) {
1038 hdev
->req_result
= result
;
1039 hdev
->req_status
= HCI_REQ_DONE
;
1040 wake_up_interruptible(&hdev
->req_wait_q
);
1044 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
1046 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
1048 if (hdev
->req_status
== HCI_REQ_PEND
) {
1049 hdev
->req_result
= err
;
1050 hdev
->req_status
= HCI_REQ_CANCELED
;
1051 wake_up_interruptible(&hdev
->req_wait_q
);
1055 static struct sk_buff
*hci_get_cmd_complete(struct hci_dev
*hdev
, u16 opcode
,
1058 struct hci_ev_cmd_complete
*ev
;
1059 struct hci_event_hdr
*hdr
;
1060 struct sk_buff
*skb
;
1064 skb
= hdev
->recv_evt
;
1065 hdev
->recv_evt
= NULL
;
1067 hci_dev_unlock(hdev
);
1070 return ERR_PTR(-ENODATA
);
1072 if (skb
->len
< sizeof(*hdr
)) {
1073 BT_ERR("Too short HCI event");
1077 hdr
= (void *) skb
->data
;
1078 skb_pull(skb
, HCI_EVENT_HDR_SIZE
);
1081 if (hdr
->evt
!= event
)
1086 if (hdr
->evt
!= HCI_EV_CMD_COMPLETE
) {
1087 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr
->evt
);
1091 if (skb
->len
< sizeof(*ev
)) {
1092 BT_ERR("Too short cmd_complete event");
1096 ev
= (void *) skb
->data
;
1097 skb_pull(skb
, sizeof(*ev
));
1099 if (opcode
== __le16_to_cpu(ev
->opcode
))
1102 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode
,
1103 __le16_to_cpu(ev
->opcode
));
1107 return ERR_PTR(-ENODATA
);
1110 struct sk_buff
*__hci_cmd_sync_ev(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
1111 const void *param
, u8 event
, u32 timeout
)
1113 DECLARE_WAITQUEUE(wait
, current
);
1114 struct hci_request req
;
1117 BT_DBG("%s", hdev
->name
);
1119 hci_req_init(&req
, hdev
);
1121 hci_req_add_ev(&req
, opcode
, plen
, param
, event
);
1123 hdev
->req_status
= HCI_REQ_PEND
;
1125 add_wait_queue(&hdev
->req_wait_q
, &wait
);
1126 set_current_state(TASK_INTERRUPTIBLE
);
1128 err
= hci_req_run(&req
, hci_req_sync_complete
);
1130 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
1131 return ERR_PTR(err
);
1134 schedule_timeout(timeout
);
1136 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
1138 if (signal_pending(current
))
1139 return ERR_PTR(-EINTR
);
1141 switch (hdev
->req_status
) {
1143 err
= -bt_to_errno(hdev
->req_result
);
1146 case HCI_REQ_CANCELED
:
1147 err
= -hdev
->req_result
;
1155 hdev
->req_status
= hdev
->req_result
= 0;
1157 BT_DBG("%s end: err %d", hdev
->name
, err
);
1160 return ERR_PTR(err
);
1162 return hci_get_cmd_complete(hdev
, opcode
, event
);
1164 EXPORT_SYMBOL(__hci_cmd_sync_ev
);
1166 struct sk_buff
*__hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
1167 const void *param
, u32 timeout
)
1169 return __hci_cmd_sync_ev(hdev
, opcode
, plen
, param
, 0, timeout
);
1171 EXPORT_SYMBOL(__hci_cmd_sync
);
1173 /* Execute request and wait for completion. */
1174 static int __hci_req_sync(struct hci_dev
*hdev
,
1175 void (*func
)(struct hci_request
*req
,
1177 unsigned long opt
, __u32 timeout
)
1179 struct hci_request req
;
1180 DECLARE_WAITQUEUE(wait
, current
);
1183 BT_DBG("%s start", hdev
->name
);
1185 hci_req_init(&req
, hdev
);
1187 hdev
->req_status
= HCI_REQ_PEND
;
1191 add_wait_queue(&hdev
->req_wait_q
, &wait
);
1192 set_current_state(TASK_INTERRUPTIBLE
);
1194 err
= hci_req_run(&req
, hci_req_sync_complete
);
1196 hdev
->req_status
= 0;
1198 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
1200 /* ENODATA means the HCI request command queue is empty.
1201 * This can happen when a request with conditionals doesn't
1202 * trigger any commands to be sent. This is normal behavior
1203 * and should not trigger an error return.
1205 if (err
== -ENODATA
)
1211 schedule_timeout(timeout
);
1213 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
1215 if (signal_pending(current
))
1218 switch (hdev
->req_status
) {
1220 err
= -bt_to_errno(hdev
->req_result
);
1223 case HCI_REQ_CANCELED
:
1224 err
= -hdev
->req_result
;
1232 hdev
->req_status
= hdev
->req_result
= 0;
1234 BT_DBG("%s end: err %d", hdev
->name
, err
);
1239 static int hci_req_sync(struct hci_dev
*hdev
,
1240 void (*req
)(struct hci_request
*req
,
1242 unsigned long opt
, __u32 timeout
)
1246 if (!test_bit(HCI_UP
, &hdev
->flags
))
1249 /* Serialize all requests */
1251 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
);
1252 hci_req_unlock(hdev
);
1257 static void hci_reset_req(struct hci_request
*req
, unsigned long opt
)
1259 BT_DBG("%s %ld", req
->hdev
->name
, opt
);
1262 set_bit(HCI_RESET
, &req
->hdev
->flags
);
1263 hci_req_add(req
, HCI_OP_RESET
, 0, NULL
);
1266 static void bredr_init(struct hci_request
*req
)
1268 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
1270 /* Read Local Supported Features */
1271 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
1273 /* Read Local Version */
1274 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
1276 /* Read BD Address */
1277 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
1280 static void amp_init(struct hci_request
*req
)
1282 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
1284 /* Read Local Version */
1285 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
1287 /* Read Local Supported Commands */
1288 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
1290 /* Read Local Supported Features */
1291 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
1293 /* Read Local AMP Info */
1294 hci_req_add(req
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
1296 /* Read Data Blk size */
1297 hci_req_add(req
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
1299 /* Read Flow Control Mode */
1300 hci_req_add(req
, HCI_OP_READ_FLOW_CONTROL_MODE
, 0, NULL
);
1302 /* Read Location Data */
1303 hci_req_add(req
, HCI_OP_READ_LOCATION_DATA
, 0, NULL
);
1306 static void hci_init1_req(struct hci_request
*req
, unsigned long opt
)
1308 struct hci_dev
*hdev
= req
->hdev
;
1310 BT_DBG("%s %ld", hdev
->name
, opt
);
1313 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
1314 hci_reset_req(req
, 0);
1316 switch (hdev
->dev_type
) {
1326 BT_ERR("Unknown device type %d", hdev
->dev_type
);
1331 static void bredr_setup(struct hci_request
*req
)
1333 struct hci_dev
*hdev
= req
->hdev
;
1338 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1339 hci_req_add(req
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
1341 /* Read Class of Device */
1342 hci_req_add(req
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
1344 /* Read Local Name */
1345 hci_req_add(req
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
1347 /* Read Voice Setting */
1348 hci_req_add(req
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
1350 /* Read Number of Supported IAC */
1351 hci_req_add(req
, HCI_OP_READ_NUM_SUPPORTED_IAC
, 0, NULL
);
1353 /* Read Current IAC LAP */
1354 hci_req_add(req
, HCI_OP_READ_CURRENT_IAC_LAP
, 0, NULL
);
1356 /* Clear Event Filters */
1357 flt_type
= HCI_FLT_CLEAR_ALL
;
1358 hci_req_add(req
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
1360 /* Connection accept timeout ~20 secs */
1361 param
= cpu_to_le16(0x7d00);
1362 hci_req_add(req
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
1364 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1365 * but it does not support page scan related HCI commands.
1367 if (hdev
->manufacturer
!= 31 && hdev
->hci_ver
> BLUETOOTH_VER_1_1
) {
1368 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_ACTIVITY
, 0, NULL
);
1369 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_TYPE
, 0, NULL
);
1373 static void le_setup(struct hci_request
*req
)
1375 struct hci_dev
*hdev
= req
->hdev
;
1377 /* Read LE Buffer Size */
1378 hci_req_add(req
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
1380 /* Read LE Local Supported Features */
1381 hci_req_add(req
, HCI_OP_LE_READ_LOCAL_FEATURES
, 0, NULL
);
1383 /* Read LE Supported States */
1384 hci_req_add(req
, HCI_OP_LE_READ_SUPPORTED_STATES
, 0, NULL
);
1386 /* Read LE White List Size */
1387 hci_req_add(req
, HCI_OP_LE_READ_WHITE_LIST_SIZE
, 0, NULL
);
1389 /* Clear LE White List */
1390 hci_req_add(req
, HCI_OP_LE_CLEAR_WHITE_LIST
, 0, NULL
);
1392 /* LE-only controllers have LE implicitly enabled */
1393 if (!lmp_bredr_capable(hdev
))
1394 set_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
);
1397 static u8
hci_get_inquiry_mode(struct hci_dev
*hdev
)
1399 if (lmp_ext_inq_capable(hdev
))
1402 if (lmp_inq_rssi_capable(hdev
))
1405 if (hdev
->manufacturer
== 11 && hdev
->hci_rev
== 0x00 &&
1406 hdev
->lmp_subver
== 0x0757)
1409 if (hdev
->manufacturer
== 15) {
1410 if (hdev
->hci_rev
== 0x03 && hdev
->lmp_subver
== 0x6963)
1412 if (hdev
->hci_rev
== 0x09 && hdev
->lmp_subver
== 0x6963)
1414 if (hdev
->hci_rev
== 0x00 && hdev
->lmp_subver
== 0x6965)
1418 if (hdev
->manufacturer
== 31 && hdev
->hci_rev
== 0x2005 &&
1419 hdev
->lmp_subver
== 0x1805)
1425 static void hci_setup_inquiry_mode(struct hci_request
*req
)
1429 mode
= hci_get_inquiry_mode(req
->hdev
);
1431 hci_req_add(req
, HCI_OP_WRITE_INQUIRY_MODE
, 1, &mode
);
1434 static void hci_setup_event_mask(struct hci_request
*req
)
1436 struct hci_dev
*hdev
= req
->hdev
;
1438 /* The second byte is 0xff instead of 0x9f (two reserved bits
1439 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1440 * command otherwise.
1442 u8 events
[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1444 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1445 * any event mask for pre 1.2 devices.
1447 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1450 if (lmp_bredr_capable(hdev
)) {
1451 events
[4] |= 0x01; /* Flow Specification Complete */
1452 events
[4] |= 0x02; /* Inquiry Result with RSSI */
1453 events
[4] |= 0x04; /* Read Remote Extended Features Complete */
1454 events
[5] |= 0x08; /* Synchronous Connection Complete */
1455 events
[5] |= 0x10; /* Synchronous Connection Changed */
1457 /* Use a different default for LE-only devices */
1458 memset(events
, 0, sizeof(events
));
1459 events
[0] |= 0x10; /* Disconnection Complete */
1460 events
[1] |= 0x08; /* Read Remote Version Information Complete */
1461 events
[1] |= 0x20; /* Command Complete */
1462 events
[1] |= 0x40; /* Command Status */
1463 events
[1] |= 0x80; /* Hardware Error */
1464 events
[2] |= 0x04; /* Number of Completed Packets */
1465 events
[3] |= 0x02; /* Data Buffer Overflow */
1467 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
) {
1468 events
[0] |= 0x80; /* Encryption Change */
1469 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
1473 if (lmp_inq_rssi_capable(hdev
))
1474 events
[4] |= 0x02; /* Inquiry Result with RSSI */
1476 if (lmp_sniffsubr_capable(hdev
))
1477 events
[5] |= 0x20; /* Sniff Subrating */
1479 if (lmp_pause_enc_capable(hdev
))
1480 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
1482 if (lmp_ext_inq_capable(hdev
))
1483 events
[5] |= 0x40; /* Extended Inquiry Result */
1485 if (lmp_no_flush_capable(hdev
))
1486 events
[7] |= 0x01; /* Enhanced Flush Complete */
1488 if (lmp_lsto_capable(hdev
))
1489 events
[6] |= 0x80; /* Link Supervision Timeout Changed */
1491 if (lmp_ssp_capable(hdev
)) {
1492 events
[6] |= 0x01; /* IO Capability Request */
1493 events
[6] |= 0x02; /* IO Capability Response */
1494 events
[6] |= 0x04; /* User Confirmation Request */
1495 events
[6] |= 0x08; /* User Passkey Request */
1496 events
[6] |= 0x10; /* Remote OOB Data Request */
1497 events
[6] |= 0x20; /* Simple Pairing Complete */
1498 events
[7] |= 0x04; /* User Passkey Notification */
1499 events
[7] |= 0x08; /* Keypress Notification */
1500 events
[7] |= 0x10; /* Remote Host Supported
1501 * Features Notification
1505 if (lmp_le_capable(hdev
))
1506 events
[7] |= 0x20; /* LE Meta-Event */
1508 hci_req_add(req
, HCI_OP_SET_EVENT_MASK
, sizeof(events
), events
);
1511 static void hci_init2_req(struct hci_request
*req
, unsigned long opt
)
1513 struct hci_dev
*hdev
= req
->hdev
;
1515 if (lmp_bredr_capable(hdev
))
1518 clear_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
1520 if (lmp_le_capable(hdev
))
1523 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524 * local supported commands HCI command.
1526 if (hdev
->manufacturer
!= 31 && hdev
->hci_ver
> BLUETOOTH_VER_1_1
)
1527 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
1529 if (lmp_ssp_capable(hdev
)) {
1530 /* When SSP is available, then the host features page
1531 * should also be available as well. However some
1532 * controllers list the max_page as 0 as long as SSP
1533 * has not been enabled. To achieve proper debugging
1534 * output, force the minimum max_page to 1 at least.
1536 hdev
->max_page
= 0x01;
1538 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
1540 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
,
1541 sizeof(mode
), &mode
);
1543 struct hci_cp_write_eir cp
;
1545 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
1546 memset(&cp
, 0, sizeof(cp
));
1548 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
1552 if (lmp_inq_rssi_capable(hdev
))
1553 hci_setup_inquiry_mode(req
);
1555 if (lmp_inq_tx_pwr_capable(hdev
))
1556 hci_req_add(req
, HCI_OP_READ_INQ_RSP_TX_POWER
, 0, NULL
);
1558 if (lmp_ext_feat_capable(hdev
)) {
1559 struct hci_cp_read_local_ext_features cp
;
1562 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
1566 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
)) {
1568 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(enable
),
1573 static void hci_setup_link_policy(struct hci_request
*req
)
1575 struct hci_dev
*hdev
= req
->hdev
;
1576 struct hci_cp_write_def_link_policy cp
;
1577 u16 link_policy
= 0;
1579 if (lmp_rswitch_capable(hdev
))
1580 link_policy
|= HCI_LP_RSWITCH
;
1581 if (lmp_hold_capable(hdev
))
1582 link_policy
|= HCI_LP_HOLD
;
1583 if (lmp_sniff_capable(hdev
))
1584 link_policy
|= HCI_LP_SNIFF
;
1585 if (lmp_park_capable(hdev
))
1586 link_policy
|= HCI_LP_PARK
;
1588 cp
.policy
= cpu_to_le16(link_policy
);
1589 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, sizeof(cp
), &cp
);
1592 static void hci_set_le_support(struct hci_request
*req
)
1594 struct hci_dev
*hdev
= req
->hdev
;
1595 struct hci_cp_write_le_host_supported cp
;
1597 /* LE-only devices do not support explicit enablement */
1598 if (!lmp_bredr_capable(hdev
))
1601 memset(&cp
, 0, sizeof(cp
));
1603 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
1608 if (cp
.le
!= lmp_host_le_capable(hdev
))
1609 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(cp
),
1613 static void hci_set_event_mask_page_2(struct hci_request
*req
)
1615 struct hci_dev
*hdev
= req
->hdev
;
1616 u8 events
[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1618 /* If Connectionless Slave Broadcast master role is supported
1619 * enable all necessary events for it.
1621 if (lmp_csb_master_capable(hdev
)) {
1622 events
[1] |= 0x40; /* Triggered Clock Capture */
1623 events
[1] |= 0x80; /* Synchronization Train Complete */
1624 events
[2] |= 0x10; /* Slave Page Response Timeout */
1625 events
[2] |= 0x20; /* CSB Channel Map Change */
1628 /* If Connectionless Slave Broadcast slave role is supported
1629 * enable all necessary events for it.
1631 if (lmp_csb_slave_capable(hdev
)) {
1632 events
[2] |= 0x01; /* Synchronization Train Received */
1633 events
[2] |= 0x02; /* CSB Receive */
1634 events
[2] |= 0x04; /* CSB Timeout */
1635 events
[2] |= 0x08; /* Truncated Page Complete */
1638 /* Enable Authenticated Payload Timeout Expired event if supported */
1639 if (lmp_ping_capable(hdev
) || hdev
->le_features
[0] & HCI_LE_PING
)
1642 hci_req_add(req
, HCI_OP_SET_EVENT_MASK_PAGE_2
, sizeof(events
), events
);
1645 static void hci_init3_req(struct hci_request
*req
, unsigned long opt
)
1647 struct hci_dev
*hdev
= req
->hdev
;
1650 hci_setup_event_mask(req
);
1652 /* Some Broadcom based Bluetooth controllers do not support the
1653 * Delete Stored Link Key command. They are clearly indicating its
1654 * absence in the bit mask of supported commands.
1656 * Check the supported commands and only if the the command is marked
1657 * as supported send it. If not supported assume that the controller
1658 * does not have actual support for stored link keys which makes this
1659 * command redundant anyway.
1661 * Some controllers indicate that they support handling deleting
1662 * stored link keys, but they don't. The quirk lets a driver
1663 * just disable this command.
1665 if (hdev
->commands
[6] & 0x80 &&
1666 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
1667 struct hci_cp_delete_stored_link_key cp
;
1669 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
1670 cp
.delete_all
= 0x01;
1671 hci_req_add(req
, HCI_OP_DELETE_STORED_LINK_KEY
,
1675 if (hdev
->commands
[5] & 0x10)
1676 hci_setup_link_policy(req
);
1678 if (lmp_le_capable(hdev
)) {
1681 memset(events
, 0, sizeof(events
));
1684 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
)
1685 events
[0] |= 0x10; /* LE Long Term Key Request */
1687 /* If controller supports the Connection Parameters Request
1688 * Link Layer Procedure, enable the corresponding event.
1690 if (hdev
->le_features
[0] & HCI_LE_CONN_PARAM_REQ_PROC
)
1691 events
[0] |= 0x20; /* LE Remote Connection
1695 hci_req_add(req
, HCI_OP_LE_SET_EVENT_MASK
, sizeof(events
),
1698 if (hdev
->commands
[25] & 0x40) {
1699 /* Read LE Advertising Channel TX Power */
1700 hci_req_add(req
, HCI_OP_LE_READ_ADV_TX_POWER
, 0, NULL
);
1703 hci_set_le_support(req
);
1706 /* Read features beyond page 1 if available */
1707 for (p
= 2; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
1708 struct hci_cp_read_local_ext_features cp
;
1711 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
1716 static void hci_init4_req(struct hci_request
*req
, unsigned long opt
)
1718 struct hci_dev
*hdev
= req
->hdev
;
1720 /* Set event mask page 2 if the HCI command for it is supported */
1721 if (hdev
->commands
[22] & 0x04)
1722 hci_set_event_mask_page_2(req
);
1724 /* Read local codec list if the HCI command is supported */
1725 if (hdev
->commands
[29] & 0x20)
1726 hci_req_add(req
, HCI_OP_READ_LOCAL_CODECS
, 0, NULL
);
1728 /* Get MWS transport configuration if the HCI command is supported */
1729 if (hdev
->commands
[30] & 0x08)
1730 hci_req_add(req
, HCI_OP_GET_MWS_TRANSPORT_CONFIG
, 0, NULL
);
1732 /* Check for Synchronization Train support */
1733 if (lmp_sync_train_capable(hdev
))
1734 hci_req_add(req
, HCI_OP_READ_SYNC_TRAIN_PARAMS
, 0, NULL
);
1736 /* Enable Secure Connections if supported and configured */
1737 if ((lmp_sc_capable(hdev
) ||
1738 test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
)) &&
1739 test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
)) {
1741 hci_req_add(req
, HCI_OP_WRITE_SC_SUPPORT
,
1742 sizeof(support
), &support
);
1746 static int __hci_init(struct hci_dev
*hdev
)
1750 err
= __hci_req_sync(hdev
, hci_init1_req
, 0, HCI_INIT_TIMEOUT
);
1754 /* The Device Under Test (DUT) mode is special and available for
1755 * all controller types. So just create it early on.
1757 if (test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1758 debugfs_create_file("dut_mode", 0644, hdev
->debugfs
, hdev
,
1762 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1763 * BR/EDR/LE type controllers. AMP controllers only need the
1766 if (hdev
->dev_type
!= HCI_BREDR
)
1769 err
= __hci_req_sync(hdev
, hci_init2_req
, 0, HCI_INIT_TIMEOUT
);
1773 err
= __hci_req_sync(hdev
, hci_init3_req
, 0, HCI_INIT_TIMEOUT
);
1777 err
= __hci_req_sync(hdev
, hci_init4_req
, 0, HCI_INIT_TIMEOUT
);
1781 /* Only create debugfs entries during the initial setup
1782 * phase and not every time the controller gets powered on.
1784 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
))
1787 debugfs_create_file("features", 0444, hdev
->debugfs
, hdev
,
1789 debugfs_create_u16("manufacturer", 0444, hdev
->debugfs
,
1790 &hdev
->manufacturer
);
1791 debugfs_create_u8("hci_version", 0444, hdev
->debugfs
, &hdev
->hci_ver
);
1792 debugfs_create_u16("hci_revision", 0444, hdev
->debugfs
, &hdev
->hci_rev
);
1793 debugfs_create_file("device_list", 0444, hdev
->debugfs
, hdev
,
1795 debugfs_create_file("blacklist", 0444, hdev
->debugfs
, hdev
,
1797 debugfs_create_file("uuids", 0444, hdev
->debugfs
, hdev
, &uuids_fops
);
1799 debugfs_create_file("conn_info_min_age", 0644, hdev
->debugfs
, hdev
,
1800 &conn_info_min_age_fops
);
1801 debugfs_create_file("conn_info_max_age", 0644, hdev
->debugfs
, hdev
,
1802 &conn_info_max_age_fops
);
1804 if (lmp_bredr_capable(hdev
)) {
1805 debugfs_create_file("inquiry_cache", 0444, hdev
->debugfs
,
1806 hdev
, &inquiry_cache_fops
);
1807 debugfs_create_file("link_keys", 0400, hdev
->debugfs
,
1808 hdev
, &link_keys_fops
);
1809 debugfs_create_file("dev_class", 0444, hdev
->debugfs
,
1810 hdev
, &dev_class_fops
);
1811 debugfs_create_file("voice_setting", 0444, hdev
->debugfs
,
1812 hdev
, &voice_setting_fops
);
1815 if (lmp_ssp_capable(hdev
)) {
1816 debugfs_create_file("auto_accept_delay", 0644, hdev
->debugfs
,
1817 hdev
, &auto_accept_delay_fops
);
1818 debugfs_create_file("force_sc_support", 0644, hdev
->debugfs
,
1819 hdev
, &force_sc_support_fops
);
1820 debugfs_create_file("sc_only_mode", 0444, hdev
->debugfs
,
1821 hdev
, &sc_only_mode_fops
);
1824 if (lmp_sniff_capable(hdev
)) {
1825 debugfs_create_file("idle_timeout", 0644, hdev
->debugfs
,
1826 hdev
, &idle_timeout_fops
);
1827 debugfs_create_file("sniff_min_interval", 0644, hdev
->debugfs
,
1828 hdev
, &sniff_min_interval_fops
);
1829 debugfs_create_file("sniff_max_interval", 0644, hdev
->debugfs
,
1830 hdev
, &sniff_max_interval_fops
);
1833 if (lmp_le_capable(hdev
)) {
1834 debugfs_create_file("identity", 0400, hdev
->debugfs
,
1835 hdev
, &identity_fops
);
1836 debugfs_create_file("rpa_timeout", 0644, hdev
->debugfs
,
1837 hdev
, &rpa_timeout_fops
);
1838 debugfs_create_file("random_address", 0444, hdev
->debugfs
,
1839 hdev
, &random_address_fops
);
1840 debugfs_create_file("static_address", 0444, hdev
->debugfs
,
1841 hdev
, &static_address_fops
);
1843 /* For controllers with a public address, provide a debug
1844 * option to force the usage of the configured static
1845 * address. By default the public address is used.
1847 if (bacmp(&hdev
->bdaddr
, BDADDR_ANY
))
1848 debugfs_create_file("force_static_address", 0644,
1849 hdev
->debugfs
, hdev
,
1850 &force_static_address_fops
);
1852 debugfs_create_u8("white_list_size", 0444, hdev
->debugfs
,
1853 &hdev
->le_white_list_size
);
1854 debugfs_create_file("white_list", 0444, hdev
->debugfs
, hdev
,
1856 debugfs_create_file("identity_resolving_keys", 0400,
1857 hdev
->debugfs
, hdev
,
1858 &identity_resolving_keys_fops
);
1859 debugfs_create_file("long_term_keys", 0400, hdev
->debugfs
,
1860 hdev
, &long_term_keys_fops
);
1861 debugfs_create_file("conn_min_interval", 0644, hdev
->debugfs
,
1862 hdev
, &conn_min_interval_fops
);
1863 debugfs_create_file("conn_max_interval", 0644, hdev
->debugfs
,
1864 hdev
, &conn_max_interval_fops
);
1865 debugfs_create_file("conn_latency", 0644, hdev
->debugfs
,
1866 hdev
, &conn_latency_fops
);
1867 debugfs_create_file("supervision_timeout", 0644, hdev
->debugfs
,
1868 hdev
, &supervision_timeout_fops
);
1869 debugfs_create_file("adv_channel_map", 0644, hdev
->debugfs
,
1870 hdev
, &adv_channel_map_fops
);
1871 debugfs_create_file("adv_min_interval", 0644, hdev
->debugfs
,
1872 hdev
, &adv_min_interval_fops
);
1873 debugfs_create_file("adv_max_interval", 0644, hdev
->debugfs
,
1874 hdev
, &adv_max_interval_fops
);
1875 debugfs_create_u16("discov_interleaved_timeout", 0644,
1877 &hdev
->discov_interleaved_timeout
);
1885 static void hci_init0_req(struct hci_request
*req
, unsigned long opt
)
1887 struct hci_dev
*hdev
= req
->hdev
;
1889 BT_DBG("%s %ld", hdev
->name
, opt
);
1892 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
1893 hci_reset_req(req
, 0);
1895 /* Read Local Version */
1896 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
1898 /* Read BD Address */
1899 if (hdev
->set_bdaddr
)
1900 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
1903 static int __hci_unconf_init(struct hci_dev
*hdev
)
1907 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
1910 err
= __hci_req_sync(hdev
, hci_init0_req
, 0, HCI_INIT_TIMEOUT
);
1917 static void hci_scan_req(struct hci_request
*req
, unsigned long opt
)
1921 BT_DBG("%s %x", req
->hdev
->name
, scan
);
1923 /* Inquiry and Page scans */
1924 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1927 static void hci_auth_req(struct hci_request
*req
, unsigned long opt
)
1931 BT_DBG("%s %x", req
->hdev
->name
, auth
);
1933 /* Authentication */
1934 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
1937 static void hci_encrypt_req(struct hci_request
*req
, unsigned long opt
)
1941 BT_DBG("%s %x", req
->hdev
->name
, encrypt
);
1944 hci_req_add(req
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
1947 static void hci_linkpol_req(struct hci_request
*req
, unsigned long opt
)
1949 __le16 policy
= cpu_to_le16(opt
);
1951 BT_DBG("%s %x", req
->hdev
->name
, policy
);
1953 /* Default link policy */
1954 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
1957 /* Get HCI device by index.
1958 * Device is held on return. */
1959 struct hci_dev
*hci_dev_get(int index
)
1961 struct hci_dev
*hdev
= NULL
, *d
;
1963 BT_DBG("%d", index
);
1968 read_lock(&hci_dev_list_lock
);
1969 list_for_each_entry(d
, &hci_dev_list
, list
) {
1970 if (d
->id
== index
) {
1971 hdev
= hci_dev_hold(d
);
1975 read_unlock(&hci_dev_list_lock
);
1979 /* ---- Inquiry support ---- */
1981 bool hci_discovery_active(struct hci_dev
*hdev
)
1983 struct discovery_state
*discov
= &hdev
->discovery
;
1985 switch (discov
->state
) {
1986 case DISCOVERY_FINDING
:
1987 case DISCOVERY_RESOLVING
:
1995 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
1997 int old_state
= hdev
->discovery
.state
;
1999 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
2001 if (old_state
== state
)
2004 hdev
->discovery
.state
= state
;
2007 case DISCOVERY_STOPPED
:
2008 hci_update_background_scan(hdev
);
2010 if (old_state
!= DISCOVERY_STARTING
)
2011 mgmt_discovering(hdev
, 0);
2013 case DISCOVERY_STARTING
:
2015 case DISCOVERY_FINDING
:
2016 mgmt_discovering(hdev
, 1);
2018 case DISCOVERY_RESOLVING
:
2020 case DISCOVERY_STOPPING
:
2025 void hci_inquiry_cache_flush(struct hci_dev
*hdev
)
2027 struct discovery_state
*cache
= &hdev
->discovery
;
2028 struct inquiry_entry
*p
, *n
;
2030 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
2035 INIT_LIST_HEAD(&cache
->unknown
);
2036 INIT_LIST_HEAD(&cache
->resolve
);
2039 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
2042 struct discovery_state
*cache
= &hdev
->discovery
;
2043 struct inquiry_entry
*e
;
2045 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
2047 list_for_each_entry(e
, &cache
->all
, all
) {
2048 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
2055 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
2058 struct discovery_state
*cache
= &hdev
->discovery
;
2059 struct inquiry_entry
*e
;
2061 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
2063 list_for_each_entry(e
, &cache
->unknown
, list
) {
2064 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
2071 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
2075 struct discovery_state
*cache
= &hdev
->discovery
;
2076 struct inquiry_entry
*e
;
2078 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
2080 list_for_each_entry(e
, &cache
->resolve
, list
) {
2081 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
2083 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
2090 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
2091 struct inquiry_entry
*ie
)
2093 struct discovery_state
*cache
= &hdev
->discovery
;
2094 struct list_head
*pos
= &cache
->resolve
;
2095 struct inquiry_entry
*p
;
2097 list_del(&ie
->list
);
2099 list_for_each_entry(p
, &cache
->resolve
, list
) {
2100 if (p
->name_state
!= NAME_PENDING
&&
2101 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
2106 list_add(&ie
->list
, pos
);
2109 u32
hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
2112 struct discovery_state
*cache
= &hdev
->discovery
;
2113 struct inquiry_entry
*ie
;
2116 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
2118 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
);
2120 if (!data
->ssp_mode
)
2121 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
2123 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
2125 if (!ie
->data
.ssp_mode
)
2126 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
2128 if (ie
->name_state
== NAME_NEEDED
&&
2129 data
->rssi
!= ie
->data
.rssi
) {
2130 ie
->data
.rssi
= data
->rssi
;
2131 hci_inquiry_cache_update_resolve(hdev
, ie
);
2137 /* Entry not in the cache. Add new one. */
2138 ie
= kzalloc(sizeof(*ie
), GFP_KERNEL
);
2140 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
2144 list_add(&ie
->all
, &cache
->all
);
2147 ie
->name_state
= NAME_KNOWN
;
2149 ie
->name_state
= NAME_NOT_KNOWN
;
2150 list_add(&ie
->list
, &cache
->unknown
);
2154 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
2155 ie
->name_state
!= NAME_PENDING
) {
2156 ie
->name_state
= NAME_KNOWN
;
2157 list_del(&ie
->list
);
2160 memcpy(&ie
->data
, data
, sizeof(*data
));
2161 ie
->timestamp
= jiffies
;
2162 cache
->timestamp
= jiffies
;
2164 if (ie
->name_state
== NAME_NOT_KNOWN
)
2165 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
2171 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
2173 struct discovery_state
*cache
= &hdev
->discovery
;
2174 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
2175 struct inquiry_entry
*e
;
2178 list_for_each_entry(e
, &cache
->all
, all
) {
2179 struct inquiry_data
*data
= &e
->data
;
2184 bacpy(&info
->bdaddr
, &data
->bdaddr
);
2185 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
2186 info
->pscan_period_mode
= data
->pscan_period_mode
;
2187 info
->pscan_mode
= data
->pscan_mode
;
2188 memcpy(info
->dev_class
, data
->dev_class
, 3);
2189 info
->clock_offset
= data
->clock_offset
;
2195 BT_DBG("cache %p, copied %d", cache
, copied
);
2199 static void hci_inq_req(struct hci_request
*req
, unsigned long opt
)
2201 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
2202 struct hci_dev
*hdev
= req
->hdev
;
2203 struct hci_cp_inquiry cp
;
2205 BT_DBG("%s", hdev
->name
);
2207 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2211 memcpy(&cp
.lap
, &ir
->lap
, 3);
2212 cp
.length
= ir
->length
;
2213 cp
.num_rsp
= ir
->num_rsp
;
2214 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2217 int hci_inquiry(void __user
*arg
)
2219 __u8 __user
*ptr
= arg
;
2220 struct hci_inquiry_req ir
;
2221 struct hci_dev
*hdev
;
2222 int err
= 0, do_inquiry
= 0, max_rsp
;
2226 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
2229 hdev
= hci_dev_get(ir
.dev_id
);
2233 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2238 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
)) {
2243 if (hdev
->dev_type
!= HCI_BREDR
) {
2248 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
2254 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
2255 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
2256 hci_inquiry_cache_flush(hdev
);
2259 hci_dev_unlock(hdev
);
2261 timeo
= ir
.length
* msecs_to_jiffies(2000);
2264 err
= hci_req_sync(hdev
, hci_inq_req
, (unsigned long) &ir
,
2269 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2270 * cleared). If it is interrupted by a signal, return -EINTR.
2272 if (wait_on_bit(&hdev
->flags
, HCI_INQUIRY
,
2273 TASK_INTERRUPTIBLE
))
2277 /* for unlimited number of responses we will use buffer with
2280 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
2282 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2283 * copy it to the user space.
2285 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
2292 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
2293 hci_dev_unlock(hdev
);
2295 BT_DBG("num_rsp %d", ir
.num_rsp
);
2297 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
2299 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
2312 static int hci_dev_do_open(struct hci_dev
*hdev
)
2316 BT_DBG("%s %p", hdev
->name
, hdev
);
2320 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
2325 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
2326 !test_bit(HCI_CONFIG
, &hdev
->dev_flags
)) {
2327 /* Check for rfkill but allow the HCI setup stage to
2328 * proceed (which in itself doesn't cause any RF activity).
2330 if (test_bit(HCI_RFKILLED
, &hdev
->dev_flags
)) {
2335 /* Check for valid public address or a configured static
2336 * random adddress, but let the HCI setup proceed to
2337 * be able to determine if there is a public address
2340 * In case of user channel usage, it is not important
2341 * if a public address or static random address is
2344 * This check is only valid for BR/EDR controllers
2345 * since AMP controllers do not have an address.
2347 if (!test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
) &&
2348 hdev
->dev_type
== HCI_BREDR
&&
2349 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
2350 !bacmp(&hdev
->static_addr
, BDADDR_ANY
)) {
2351 ret
= -EADDRNOTAVAIL
;
2356 if (test_bit(HCI_UP
, &hdev
->flags
)) {
2361 if (hdev
->open(hdev
)) {
2366 atomic_set(&hdev
->cmd_cnt
, 1);
2367 set_bit(HCI_INIT
, &hdev
->flags
);
2369 if (test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
2371 ret
= hdev
->setup(hdev
);
2373 /* The transport driver can set these quirks before
2374 * creating the HCI device or in its setup callback.
2376 * In case any of them is set, the controller has to
2377 * start up as unconfigured.
2379 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
2380 test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
))
2381 set_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
);
2383 /* For an unconfigured controller it is required to
2384 * read at least the version information provided by
2385 * the Read Local Version Information command.
2387 * If the set_bdaddr driver callback is provided, then
2388 * also the original Bluetooth public device address
2389 * will be read using the Read BD Address command.
2391 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
2392 ret
= __hci_unconf_init(hdev
);
2395 if (test_bit(HCI_CONFIG
, &hdev
->dev_flags
)) {
2396 /* If public address change is configured, ensure that
2397 * the address gets programmed. If the driver does not
2398 * support changing the public address, fail the power
2401 if (bacmp(&hdev
->public_addr
, BDADDR_ANY
) &&
2403 ret
= hdev
->set_bdaddr(hdev
, &hdev
->public_addr
);
2405 ret
= -EADDRNOTAVAIL
;
2409 if (!test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
) &&
2410 !test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
))
2411 ret
= __hci_init(hdev
);
2414 clear_bit(HCI_INIT
, &hdev
->flags
);
2418 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
2419 set_bit(HCI_UP
, &hdev
->flags
);
2420 hci_notify(hdev
, HCI_DEV_UP
);
2421 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
2422 !test_bit(HCI_CONFIG
, &hdev
->dev_flags
) &&
2423 !test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
) &&
2424 !test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
) &&
2425 hdev
->dev_type
== HCI_BREDR
) {
2427 mgmt_powered(hdev
, 1);
2428 hci_dev_unlock(hdev
);
2431 /* Init failed, cleanup */
2432 flush_work(&hdev
->tx_work
);
2433 flush_work(&hdev
->cmd_work
);
2434 flush_work(&hdev
->rx_work
);
2436 skb_queue_purge(&hdev
->cmd_q
);
2437 skb_queue_purge(&hdev
->rx_q
);
2442 if (hdev
->sent_cmd
) {
2443 kfree_skb(hdev
->sent_cmd
);
2444 hdev
->sent_cmd
= NULL
;
2448 hdev
->flags
&= BIT(HCI_RAW
);
2452 hci_req_unlock(hdev
);
2456 /* ---- HCI ioctl helpers ---- */
2458 int hci_dev_open(__u16 dev
)
2460 struct hci_dev
*hdev
;
2463 hdev
= hci_dev_get(dev
);
2467 /* Devices that are marked as unconfigured can only be powered
2468 * up as user channel. Trying to bring them up as normal devices
2469 * will result into a failure. Only user channel operation is
2472 * When this function is called for a user channel, the flag
2473 * HCI_USER_CHANNEL will be set first before attempting to
2476 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
) &&
2477 !test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2482 /* We need to ensure that no other power on/off work is pending
2483 * before proceeding to call hci_dev_do_open. This is
2484 * particularly important if the setup procedure has not yet
2487 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
2488 cancel_delayed_work(&hdev
->power_off
);
2490 /* After this call it is guaranteed that the setup procedure
2491 * has finished. This means that error conditions like RFKILL
2492 * or no valid public or static random address apply.
2494 flush_workqueue(hdev
->req_workqueue
);
2496 /* For controllers not using the management interface and that
2497 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2498 * so that pairing works for them. Once the management interface
2499 * is in use this bit will be cleared again and userspace has
2500 * to explicitly enable it.
2502 if (!test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
) &&
2503 !test_bit(HCI_MGMT
, &hdev
->dev_flags
))
2504 set_bit(HCI_BONDABLE
, &hdev
->dev_flags
);
2506 err
= hci_dev_do_open(hdev
);
2513 /* This function requires the caller holds hdev->lock */
2514 static void hci_pend_le_actions_clear(struct hci_dev
*hdev
)
2516 struct hci_conn_params
*p
;
2518 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
2520 hci_conn_drop(p
->conn
);
2521 hci_conn_put(p
->conn
);
2524 list_del_init(&p
->action
);
2527 BT_DBG("All LE pending actions cleared");
2530 static int hci_dev_do_close(struct hci_dev
*hdev
)
2532 BT_DBG("%s %p", hdev
->name
, hdev
);
2534 cancel_delayed_work(&hdev
->power_off
);
2536 hci_req_cancel(hdev
, ENODEV
);
2539 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
2540 cancel_delayed_work_sync(&hdev
->cmd_timer
);
2541 hci_req_unlock(hdev
);
2545 /* Flush RX and TX works */
2546 flush_work(&hdev
->tx_work
);
2547 flush_work(&hdev
->rx_work
);
2549 if (hdev
->discov_timeout
> 0) {
2550 cancel_delayed_work(&hdev
->discov_off
);
2551 hdev
->discov_timeout
= 0;
2552 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
2553 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
2556 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
2557 cancel_delayed_work(&hdev
->service_cache
);
2559 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
2561 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
))
2562 cancel_delayed_work_sync(&hdev
->rpa_expired
);
2565 hci_inquiry_cache_flush(hdev
);
2566 hci_pend_le_actions_clear(hdev
);
2567 hci_conn_hash_flush(hdev
);
2568 hci_dev_unlock(hdev
);
2570 hci_notify(hdev
, HCI_DEV_DOWN
);
2576 skb_queue_purge(&hdev
->cmd_q
);
2577 atomic_set(&hdev
->cmd_cnt
, 1);
2578 if (!test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
) &&
2579 !test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
) &&
2580 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
2581 set_bit(HCI_INIT
, &hdev
->flags
);
2582 __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
2583 clear_bit(HCI_INIT
, &hdev
->flags
);
2586 /* flush cmd work */
2587 flush_work(&hdev
->cmd_work
);
2590 skb_queue_purge(&hdev
->rx_q
);
2591 skb_queue_purge(&hdev
->cmd_q
);
2592 skb_queue_purge(&hdev
->raw_q
);
2594 /* Drop last sent command */
2595 if (hdev
->sent_cmd
) {
2596 cancel_delayed_work_sync(&hdev
->cmd_timer
);
2597 kfree_skb(hdev
->sent_cmd
);
2598 hdev
->sent_cmd
= NULL
;
2601 kfree_skb(hdev
->recv_evt
);
2602 hdev
->recv_evt
= NULL
;
2604 /* After this point our queues are empty
2605 * and no tasks are scheduled. */
2609 hdev
->flags
&= BIT(HCI_RAW
);
2610 hdev
->dev_flags
&= ~HCI_PERSISTENT_MASK
;
2612 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
2613 if (hdev
->dev_type
== HCI_BREDR
) {
2615 mgmt_powered(hdev
, 0);
2616 hci_dev_unlock(hdev
);
2620 /* Controller radio is available but is currently powered down */
2621 hdev
->amp_status
= AMP_STATUS_POWERED_DOWN
;
2623 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
2624 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
2625 bacpy(&hdev
->random_addr
, BDADDR_ANY
);
2627 hci_req_unlock(hdev
);
2633 int hci_dev_close(__u16 dev
)
2635 struct hci_dev
*hdev
;
2638 hdev
= hci_dev_get(dev
);
2642 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2647 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
2648 cancel_delayed_work(&hdev
->power_off
);
2650 err
= hci_dev_do_close(hdev
);
2657 int hci_dev_reset(__u16 dev
)
2659 struct hci_dev
*hdev
;
2662 hdev
= hci_dev_get(dev
);
2668 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2673 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2678 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
)) {
2684 skb_queue_purge(&hdev
->rx_q
);
2685 skb_queue_purge(&hdev
->cmd_q
);
2688 hci_inquiry_cache_flush(hdev
);
2689 hci_conn_hash_flush(hdev
);
2690 hci_dev_unlock(hdev
);
2695 atomic_set(&hdev
->cmd_cnt
, 1);
2696 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
2698 ret
= __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
2701 hci_req_unlock(hdev
);
2706 int hci_dev_reset_stat(__u16 dev
)
2708 struct hci_dev
*hdev
;
2711 hdev
= hci_dev_get(dev
);
2715 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2720 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
)) {
2725 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
2732 static void hci_update_scan_state(struct hci_dev
*hdev
, u8 scan
)
2734 bool conn_changed
, discov_changed
;
2736 BT_DBG("%s scan 0x%02x", hdev
->name
, scan
);
2738 if ((scan
& SCAN_PAGE
))
2739 conn_changed
= !test_and_set_bit(HCI_CONNECTABLE
,
2742 conn_changed
= test_and_clear_bit(HCI_CONNECTABLE
,
2745 if ((scan
& SCAN_INQUIRY
)) {
2746 discov_changed
= !test_and_set_bit(HCI_DISCOVERABLE
,
2749 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
2750 discov_changed
= test_and_clear_bit(HCI_DISCOVERABLE
,
2754 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
2757 if (conn_changed
|| discov_changed
) {
2758 /* In case this was disabled through mgmt */
2759 set_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
2761 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
2762 mgmt_update_adv_data(hdev
);
2764 mgmt_new_settings(hdev
);
2768 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
2770 struct hci_dev
*hdev
;
2771 struct hci_dev_req dr
;
2774 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
2777 hdev
= hci_dev_get(dr
.dev_id
);
2781 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2786 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
)) {
2791 if (hdev
->dev_type
!= HCI_BREDR
) {
2796 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
2803 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
2808 if (!lmp_encrypt_capable(hdev
)) {
2813 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
2814 /* Auth must be enabled first */
2815 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
2821 err
= hci_req_sync(hdev
, hci_encrypt_req
, dr
.dev_opt
,
2826 err
= hci_req_sync(hdev
, hci_scan_req
, dr
.dev_opt
,
2829 /* Ensure that the connectable and discoverable states
2830 * get correctly modified as this was a non-mgmt change.
2833 hci_update_scan_state(hdev
, dr
.dev_opt
);
2837 err
= hci_req_sync(hdev
, hci_linkpol_req
, dr
.dev_opt
,
2841 case HCISETLINKMODE
:
2842 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
2843 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
2847 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
2851 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
2852 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
2856 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
2857 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
2870 int hci_get_dev_list(void __user
*arg
)
2872 struct hci_dev
*hdev
;
2873 struct hci_dev_list_req
*dl
;
2874 struct hci_dev_req
*dr
;
2875 int n
= 0, size
, err
;
2878 if (get_user(dev_num
, (__u16 __user
*) arg
))
2881 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
2884 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
2886 dl
= kzalloc(size
, GFP_KERNEL
);
2892 read_lock(&hci_dev_list_lock
);
2893 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
2894 unsigned long flags
= hdev
->flags
;
2896 /* When the auto-off is configured it means the transport
2897 * is running, but in that case still indicate that the
2898 * device is actually down.
2900 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
2901 flags
&= ~BIT(HCI_UP
);
2903 (dr
+ n
)->dev_id
= hdev
->id
;
2904 (dr
+ n
)->dev_opt
= flags
;
2909 read_unlock(&hci_dev_list_lock
);
2912 size
= sizeof(*dl
) + n
* sizeof(*dr
);
2914 err
= copy_to_user(arg
, dl
, size
);
2917 return err
? -EFAULT
: 0;
2920 int hci_get_dev_info(void __user
*arg
)
2922 struct hci_dev
*hdev
;
2923 struct hci_dev_info di
;
2924 unsigned long flags
;
2927 if (copy_from_user(&di
, arg
, sizeof(di
)))
2930 hdev
= hci_dev_get(di
.dev_id
);
2934 /* When the auto-off is configured it means the transport
2935 * is running, but in that case still indicate that the
2936 * device is actually down.
2938 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
2939 flags
= hdev
->flags
& ~BIT(HCI_UP
);
2941 flags
= hdev
->flags
;
2943 strcpy(di
.name
, hdev
->name
);
2944 di
.bdaddr
= hdev
->bdaddr
;
2945 di
.type
= (hdev
->bus
& 0x0f) | ((hdev
->dev_type
& 0x03) << 4);
2947 di
.pkt_type
= hdev
->pkt_type
;
2948 if (lmp_bredr_capable(hdev
)) {
2949 di
.acl_mtu
= hdev
->acl_mtu
;
2950 di
.acl_pkts
= hdev
->acl_pkts
;
2951 di
.sco_mtu
= hdev
->sco_mtu
;
2952 di
.sco_pkts
= hdev
->sco_pkts
;
2954 di
.acl_mtu
= hdev
->le_mtu
;
2955 di
.acl_pkts
= hdev
->le_pkts
;
2959 di
.link_policy
= hdev
->link_policy
;
2960 di
.link_mode
= hdev
->link_mode
;
2962 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
2963 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
2965 if (copy_to_user(arg
, &di
, sizeof(di
)))
2973 /* ---- Interface to HCI drivers ---- */
2975 static int hci_rfkill_set_block(void *data
, bool blocked
)
2977 struct hci_dev
*hdev
= data
;
2979 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
2981 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
))
2985 set_bit(HCI_RFKILLED
, &hdev
->dev_flags
);
2986 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
2987 !test_bit(HCI_CONFIG
, &hdev
->dev_flags
))
2988 hci_dev_do_close(hdev
);
2990 clear_bit(HCI_RFKILLED
, &hdev
->dev_flags
);
2996 static const struct rfkill_ops hci_rfkill_ops
= {
2997 .set_block
= hci_rfkill_set_block
,
3000 static void hci_power_on(struct work_struct
*work
)
3002 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
3005 BT_DBG("%s", hdev
->name
);
3007 err
= hci_dev_do_open(hdev
);
3009 mgmt_set_powered_failed(hdev
, err
);
3013 /* During the HCI setup phase, a few error conditions are
3014 * ignored and they need to be checked now. If they are still
3015 * valid, it is important to turn the device back off.
3017 if (test_bit(HCI_RFKILLED
, &hdev
->dev_flags
) ||
3018 test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
) ||
3019 (hdev
->dev_type
== HCI_BREDR
&&
3020 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
3021 !bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
3022 clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
3023 hci_dev_do_close(hdev
);
3024 } else if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
3025 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
3026 HCI_AUTO_OFF_TIMEOUT
);
3029 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
3030 /* For unconfigured devices, set the HCI_RAW flag
3031 * so that userspace can easily identify them.
3033 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
3034 set_bit(HCI_RAW
, &hdev
->flags
);
3036 /* For fully configured devices, this will send
3037 * the Index Added event. For unconfigured devices,
3038 * it will send Unconfigued Index Added event.
3040 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3041 * and no event will be send.
3043 mgmt_index_added(hdev
);
3044 } else if (test_and_clear_bit(HCI_CONFIG
, &hdev
->dev_flags
)) {
3045 /* When the controller is now configured, then it
3046 * is important to clear the HCI_RAW flag.
3048 if (!test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
3049 clear_bit(HCI_RAW
, &hdev
->flags
);
3051 /* Powering on the controller with HCI_CONFIG set only
3052 * happens with the transition from unconfigured to
3053 * configured. This will send the Index Added event.
3055 mgmt_index_added(hdev
);
3059 static void hci_power_off(struct work_struct
*work
)
3061 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
3064 BT_DBG("%s", hdev
->name
);
3066 hci_dev_do_close(hdev
);
3069 static void hci_discov_off(struct work_struct
*work
)
3071 struct hci_dev
*hdev
;
3073 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
3075 BT_DBG("%s", hdev
->name
);
3077 mgmt_discoverable_timeout(hdev
);
3080 void hci_uuids_clear(struct hci_dev
*hdev
)
3082 struct bt_uuid
*uuid
, *tmp
;
3084 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
3085 list_del(&uuid
->list
);
3090 void hci_link_keys_clear(struct hci_dev
*hdev
)
3092 struct list_head
*p
, *n
;
3094 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
3095 struct link_key
*key
;
3097 key
= list_entry(p
, struct link_key
, list
);
3104 void hci_smp_ltks_clear(struct hci_dev
*hdev
)
3108 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
3109 list_del_rcu(&k
->list
);
3114 void hci_smp_irks_clear(struct hci_dev
*hdev
)
3118 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
3119 list_del_rcu(&k
->list
);
3124 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
3128 list_for_each_entry(k
, &hdev
->link_keys
, list
)
3129 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
3135 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
3136 u8 key_type
, u8 old_key_type
)
3139 if (key_type
< 0x03)
3142 /* Debug keys are insecure so don't store them persistently */
3143 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
3146 /* Changed combination key and there's no previous one */
3147 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
3150 /* Security mode 3 case */
3154 /* Neither local nor remote side had no-bonding as requirement */
3155 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
3158 /* Local side had dedicated bonding as requirement */
3159 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
3162 /* Remote side had dedicated bonding as requirement */
3163 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
3166 /* If none of the above criteria match, then don't store the key
3171 static u8
ltk_role(u8 type
)
3173 if (type
== SMP_LTK
)
3174 return HCI_ROLE_MASTER
;
3176 return HCI_ROLE_SLAVE
;
3179 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, __le64 rand
,
3185 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
3186 if (k
->ediv
!= ediv
|| k
->rand
!= rand
)
3189 if (ltk_role(k
->type
) != role
)
3200 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
3201 u8 addr_type
, u8 role
)
3206 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
3207 if (addr_type
== k
->bdaddr_type
&&
3208 bacmp(bdaddr
, &k
->bdaddr
) == 0 &&
3209 ltk_role(k
->type
) == role
) {
3219 struct smp_irk
*hci_find_irk_by_rpa(struct hci_dev
*hdev
, bdaddr_t
*rpa
)
3221 struct smp_irk
*irk
;
3224 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
3225 if (!bacmp(&irk
->rpa
, rpa
)) {
3231 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
3232 if (smp_irk_matches(hdev
, irk
->val
, rpa
)) {
3233 bacpy(&irk
->rpa
, rpa
);
3243 struct smp_irk
*hci_find_irk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
3246 struct smp_irk
*irk
;
3248 /* Identity Address must be public or static random */
3249 if (addr_type
== ADDR_LE_DEV_RANDOM
&& (bdaddr
->b
[5] & 0xc0) != 0xc0)
3253 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
3254 if (addr_type
== irk
->addr_type
&&
3255 bacmp(bdaddr
, &irk
->bdaddr
) == 0) {
3265 struct link_key
*hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
3266 bdaddr_t
*bdaddr
, u8
*val
, u8 type
,
3267 u8 pin_len
, bool *persistent
)
3269 struct link_key
*key
, *old_key
;
3272 old_key
= hci_find_link_key(hdev
, bdaddr
);
3274 old_key_type
= old_key
->type
;
3277 old_key_type
= conn
? conn
->key_type
: 0xff;
3278 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
3281 list_add(&key
->list
, &hdev
->link_keys
);
3284 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
3286 /* Some buggy controller combinations generate a changed
3287 * combination key for legacy pairing even when there's no
3289 if (type
== HCI_LK_CHANGED_COMBINATION
&&
3290 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
3291 type
= HCI_LK_COMBINATION
;
3293 conn
->key_type
= type
;
3296 bacpy(&key
->bdaddr
, bdaddr
);
3297 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
3298 key
->pin_len
= pin_len
;
3300 if (type
== HCI_LK_CHANGED_COMBINATION
)
3301 key
->type
= old_key_type
;
3306 *persistent
= hci_persistent_key(hdev
, conn
, type
,
3312 struct smp_ltk
*hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
3313 u8 addr_type
, u8 type
, u8 authenticated
,
3314 u8 tk
[16], u8 enc_size
, __le16 ediv
, __le64 rand
)
3316 struct smp_ltk
*key
, *old_key
;
3317 u8 role
= ltk_role(type
);
3319 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
, role
);
3323 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
3326 list_add_rcu(&key
->list
, &hdev
->long_term_keys
);
3329 bacpy(&key
->bdaddr
, bdaddr
);
3330 key
->bdaddr_type
= addr_type
;
3331 memcpy(key
->val
, tk
, sizeof(key
->val
));
3332 key
->authenticated
= authenticated
;
3335 key
->enc_size
= enc_size
;
3341 struct smp_irk
*hci_add_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
3342 u8 addr_type
, u8 val
[16], bdaddr_t
*rpa
)
3344 struct smp_irk
*irk
;
3346 irk
= hci_find_irk_by_addr(hdev
, bdaddr
, addr_type
);
3348 irk
= kzalloc(sizeof(*irk
), GFP_KERNEL
);
3352 bacpy(&irk
->bdaddr
, bdaddr
);
3353 irk
->addr_type
= addr_type
;
3355 list_add_rcu(&irk
->list
, &hdev
->identity_resolving_keys
);
3358 memcpy(irk
->val
, val
, 16);
3359 bacpy(&irk
->rpa
, rpa
);
3364 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
3366 struct link_key
*key
;
3368 key
= hci_find_link_key(hdev
, bdaddr
);
3372 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
3374 list_del(&key
->list
);
3380 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 bdaddr_type
)
3385 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
3386 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->bdaddr_type
!= bdaddr_type
)
3389 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
3391 list_del_rcu(&k
->list
);
3396 return removed
? 0 : -ENOENT
;
3399 void hci_remove_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
)
3403 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
3404 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->addr_type
!= addr_type
)
3407 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
3409 list_del_rcu(&k
->list
);
3414 /* HCI command timer function */
3415 static void hci_cmd_timeout(struct work_struct
*work
)
3417 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
3420 if (hdev
->sent_cmd
) {
3421 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
3422 u16 opcode
= __le16_to_cpu(sent
->opcode
);
3424 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
3426 BT_ERR("%s command tx timeout", hdev
->name
);
3429 atomic_set(&hdev
->cmd_cnt
, 1);
3430 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3433 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
3436 struct oob_data
*data
;
3438 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
3439 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
3445 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
3447 struct oob_data
*data
;
3449 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
3453 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
3455 list_del(&data
->list
);
3461 void hci_remote_oob_data_clear(struct hci_dev
*hdev
)
3463 struct oob_data
*data
, *n
;
3465 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
3466 list_del(&data
->list
);
3471 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
3474 struct oob_data
*data
;
3476 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
3478 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
3482 bacpy(&data
->bdaddr
, bdaddr
);
3483 list_add(&data
->list
, &hdev
->remote_oob_data
);
3486 memcpy(data
->hash192
, hash
, sizeof(data
->hash192
));
3487 memcpy(data
->rand192
, rand
, sizeof(data
->rand192
));
3489 memset(data
->hash256
, 0, sizeof(data
->hash256
));
3490 memset(data
->rand256
, 0, sizeof(data
->rand256
));
3492 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
3497 int hci_add_remote_oob_ext_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
3498 u8
*hash192
, u8
*rand192
,
3499 u8
*hash256
, u8
*rand256
)
3501 struct oob_data
*data
;
3503 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
3505 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
3509 bacpy(&data
->bdaddr
, bdaddr
);
3510 list_add(&data
->list
, &hdev
->remote_oob_data
);
3513 memcpy(data
->hash192
, hash192
, sizeof(data
->hash192
));
3514 memcpy(data
->rand192
, rand192
, sizeof(data
->rand192
));
3516 memcpy(data
->hash256
, hash256
, sizeof(data
->hash256
));
3517 memcpy(data
->rand256
, rand256
, sizeof(data
->rand256
));
3519 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
3524 struct bdaddr_list
*hci_bdaddr_list_lookup(struct list_head
*bdaddr_list
,
3525 bdaddr_t
*bdaddr
, u8 type
)
3527 struct bdaddr_list
*b
;
3529 list_for_each_entry(b
, bdaddr_list
, list
) {
3530 if (!bacmp(&b
->bdaddr
, bdaddr
) && b
->bdaddr_type
== type
)
3537 void hci_bdaddr_list_clear(struct list_head
*bdaddr_list
)
3539 struct list_head
*p
, *n
;
3541 list_for_each_safe(p
, n
, bdaddr_list
) {
3542 struct bdaddr_list
*b
= list_entry(p
, struct bdaddr_list
, list
);
3549 int hci_bdaddr_list_add(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
3551 struct bdaddr_list
*entry
;
3553 if (!bacmp(bdaddr
, BDADDR_ANY
))
3556 if (hci_bdaddr_list_lookup(list
, bdaddr
, type
))
3559 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
3563 bacpy(&entry
->bdaddr
, bdaddr
);
3564 entry
->bdaddr_type
= type
;
3566 list_add(&entry
->list
, list
);
3571 int hci_bdaddr_list_del(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
3573 struct bdaddr_list
*entry
;
3575 if (!bacmp(bdaddr
, BDADDR_ANY
)) {
3576 hci_bdaddr_list_clear(list
);
3580 entry
= hci_bdaddr_list_lookup(list
, bdaddr
, type
);
3584 list_del(&entry
->list
);
3590 /* This function requires the caller holds hdev->lock */
3591 struct hci_conn_params
*hci_conn_params_lookup(struct hci_dev
*hdev
,
3592 bdaddr_t
*addr
, u8 addr_type
)
3594 struct hci_conn_params
*params
;
3596 /* The conn params list only contains identity addresses */
3597 if (!hci_is_identity_address(addr
, addr_type
))
3600 list_for_each_entry(params
, &hdev
->le_conn_params
, list
) {
3601 if (bacmp(¶ms
->addr
, addr
) == 0 &&
3602 params
->addr_type
== addr_type
) {
3610 static bool is_connected(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 type
)
3612 struct hci_conn
*conn
;
3614 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, addr
);
3618 if (conn
->dst_type
!= type
)
3621 if (conn
->state
!= BT_CONNECTED
)
3627 /* This function requires the caller holds hdev->lock */
3628 struct hci_conn_params
*hci_pend_le_action_lookup(struct list_head
*list
,
3629 bdaddr_t
*addr
, u8 addr_type
)
3631 struct hci_conn_params
*param
;
3633 /* The list only contains identity addresses */
3634 if (!hci_is_identity_address(addr
, addr_type
))
3637 list_for_each_entry(param
, list
, action
) {
3638 if (bacmp(¶m
->addr
, addr
) == 0 &&
3639 param
->addr_type
== addr_type
)
3646 /* This function requires the caller holds hdev->lock */
3647 struct hci_conn_params
*hci_conn_params_add(struct hci_dev
*hdev
,
3648 bdaddr_t
*addr
, u8 addr_type
)
3650 struct hci_conn_params
*params
;
3652 if (!hci_is_identity_address(addr
, addr_type
))
3655 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
3659 params
= kzalloc(sizeof(*params
), GFP_KERNEL
);
3661 BT_ERR("Out of memory");
3665 bacpy(¶ms
->addr
, addr
);
3666 params
->addr_type
= addr_type
;
3668 list_add(¶ms
->list
, &hdev
->le_conn_params
);
3669 INIT_LIST_HEAD(¶ms
->action
);
3671 params
->conn_min_interval
= hdev
->le_conn_min_interval
;
3672 params
->conn_max_interval
= hdev
->le_conn_max_interval
;
3673 params
->conn_latency
= hdev
->le_conn_latency
;
3674 params
->supervision_timeout
= hdev
->le_supv_timeout
;
3675 params
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
3677 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
3682 /* This function requires the caller holds hdev->lock */
3683 int hci_conn_params_set(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 addr_type
,
3686 struct hci_conn_params
*params
;
3688 params
= hci_conn_params_add(hdev
, addr
, addr_type
);
3692 if (params
->auto_connect
== auto_connect
)
3695 list_del_init(¶ms
->action
);
3697 switch (auto_connect
) {
3698 case HCI_AUTO_CONN_DISABLED
:
3699 case HCI_AUTO_CONN_LINK_LOSS
:
3700 hci_update_background_scan(hdev
);
3702 case HCI_AUTO_CONN_REPORT
:
3703 list_add(¶ms
->action
, &hdev
->pend_le_reports
);
3704 hci_update_background_scan(hdev
);
3706 case HCI_AUTO_CONN_DIRECT
:
3707 case HCI_AUTO_CONN_ALWAYS
:
3708 if (!is_connected(hdev
, addr
, addr_type
)) {
3709 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
3710 hci_update_background_scan(hdev
);
3715 params
->auto_connect
= auto_connect
;
3717 BT_DBG("addr %pMR (type %u) auto_connect %u", addr
, addr_type
,
3723 static void hci_conn_params_free(struct hci_conn_params
*params
)
3726 hci_conn_drop(params
->conn
);
3727 hci_conn_put(params
->conn
);
3730 list_del(¶ms
->action
);
3731 list_del(¶ms
->list
);
3735 /* This function requires the caller holds hdev->lock */
3736 void hci_conn_params_del(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 addr_type
)
3738 struct hci_conn_params
*params
;
3740 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
3744 hci_conn_params_free(params
);
3746 hci_update_background_scan(hdev
);
3748 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
3751 /* This function requires the caller holds hdev->lock */
3752 void hci_conn_params_clear_disabled(struct hci_dev
*hdev
)
3754 struct hci_conn_params
*params
, *tmp
;
3756 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
) {
3757 if (params
->auto_connect
!= HCI_AUTO_CONN_DISABLED
)
3759 list_del(¶ms
->list
);
3763 BT_DBG("All LE disabled connection parameters were removed");
3766 /* This function requires the caller holds hdev->lock */
3767 void hci_conn_params_clear_all(struct hci_dev
*hdev
)
3769 struct hci_conn_params
*params
, *tmp
;
3771 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
)
3772 hci_conn_params_free(params
);
3774 hci_update_background_scan(hdev
);
3776 BT_DBG("All LE connection parameters were removed");
3779 static void inquiry_complete(struct hci_dev
*hdev
, u8 status
)
3782 BT_ERR("Failed to start inquiry: status %d", status
);
3785 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3786 hci_dev_unlock(hdev
);
3791 static void le_scan_disable_work_complete(struct hci_dev
*hdev
, u8 status
)
3793 /* General inquiry access code (GIAC) */
3794 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3795 struct hci_request req
;
3796 struct hci_cp_inquiry cp
;
3800 BT_ERR("Failed to disable LE scanning: status %d", status
);
3804 switch (hdev
->discovery
.type
) {
3805 case DISCOV_TYPE_LE
:
3807 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3808 hci_dev_unlock(hdev
);
3811 case DISCOV_TYPE_INTERLEAVED
:
3812 hci_req_init(&req
, hdev
);
3814 memset(&cp
, 0, sizeof(cp
));
3815 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
3816 cp
.length
= DISCOV_INTERLEAVED_INQUIRY_LEN
;
3817 hci_req_add(&req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
3821 hci_inquiry_cache_flush(hdev
);
3823 err
= hci_req_run(&req
, inquiry_complete
);
3825 BT_ERR("Inquiry request failed: err %d", err
);
3826 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3829 hci_dev_unlock(hdev
);
3834 static void le_scan_disable_work(struct work_struct
*work
)
3836 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
3837 le_scan_disable
.work
);
3838 struct hci_request req
;
3841 BT_DBG("%s", hdev
->name
);
3843 hci_req_init(&req
, hdev
);
3845 hci_req_add_le_scan_disable(&req
);
3847 err
= hci_req_run(&req
, le_scan_disable_work_complete
);
3849 BT_ERR("Disable LE scanning request failed: err %d", err
);
3852 static void set_random_addr(struct hci_request
*req
, bdaddr_t
*rpa
)
3854 struct hci_dev
*hdev
= req
->hdev
;
3856 /* If we're advertising or initiating an LE connection we can't
3857 * go ahead and change the random address at this time. This is
3858 * because the eventual initiator address used for the
3859 * subsequently created connection will be undefined (some
3860 * controllers use the new address and others the one we had
3861 * when the operation started).
3863 * In this kind of scenario skip the update and let the random
3864 * address be updated at the next cycle.
3866 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
) ||
3867 hci_conn_hash_lookup_state(hdev
, LE_LINK
, BT_CONNECT
)) {
3868 BT_DBG("Deferring random address update");
3869 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
3873 hci_req_add(req
, HCI_OP_LE_SET_RANDOM_ADDR
, 6, rpa
);
3876 int hci_update_random_address(struct hci_request
*req
, bool require_privacy
,
3879 struct hci_dev
*hdev
= req
->hdev
;
3882 /* If privacy is enabled use a resolvable private address. If
3883 * current RPA has expired or there is something else than
3884 * the current RPA in use, then generate a new one.
3886 if (test_bit(HCI_PRIVACY
, &hdev
->dev_flags
)) {
3889 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
3891 if (!test_and_clear_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
) &&
3892 !bacmp(&hdev
->random_addr
, &hdev
->rpa
))
3895 err
= smp_generate_rpa(hdev
, hdev
->irk
, &hdev
->rpa
);
3897 BT_ERR("%s failed to generate new RPA", hdev
->name
);
3901 set_random_addr(req
, &hdev
->rpa
);
3903 to
= msecs_to_jiffies(hdev
->rpa_timeout
* 1000);
3904 queue_delayed_work(hdev
->workqueue
, &hdev
->rpa_expired
, to
);
3909 /* In case of required privacy without resolvable private address,
3910 * use an unresolvable private address. This is useful for active
3911 * scanning and non-connectable advertising.
3913 if (require_privacy
) {
3916 get_random_bytes(&urpa
, 6);
3917 urpa
.b
[5] &= 0x3f; /* Clear two most significant bits */
3919 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
3920 set_random_addr(req
, &urpa
);
3924 /* If forcing static address is in use or there is no public
3925 * address use the static address as random address (but skip
3926 * the HCI command if the current random address is already the
3929 if (test_bit(HCI_FORCE_STATIC_ADDR
, &hdev
->dbg_flags
) ||
3930 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
)) {
3931 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
3932 if (bacmp(&hdev
->static_addr
, &hdev
->random_addr
))
3933 hci_req_add(req
, HCI_OP_LE_SET_RANDOM_ADDR
, 6,
3934 &hdev
->static_addr
);
3938 /* Neither privacy nor static address is being used so use a
3941 *own_addr_type
= ADDR_LE_DEV_PUBLIC
;
3946 /* Copy the Identity Address of the controller.
3948 * If the controller has a public BD_ADDR, then by default use that one.
3949 * If this is a LE only controller without a public address, default to
3950 * the static random address.
3952 * For debugging purposes it is possible to force controllers with a
3953 * public address to use the static random address instead.
3955 void hci_copy_identity_address(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
3958 if (test_bit(HCI_FORCE_STATIC_ADDR
, &hdev
->dbg_flags
) ||
3959 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
)) {
3960 bacpy(bdaddr
, &hdev
->static_addr
);
3961 *bdaddr_type
= ADDR_LE_DEV_RANDOM
;
3963 bacpy(bdaddr
, &hdev
->bdaddr
);
3964 *bdaddr_type
= ADDR_LE_DEV_PUBLIC
;
3968 /* Alloc HCI device */
3969 struct hci_dev
*hci_alloc_dev(void)
3971 struct hci_dev
*hdev
;
3973 hdev
= kzalloc(sizeof(*hdev
), GFP_KERNEL
);
3977 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
3978 hdev
->esco_type
= (ESCO_HV1
);
3979 hdev
->link_mode
= (HCI_LM_ACCEPT
);
3980 hdev
->num_iac
= 0x01; /* One IAC support is mandatory */
3981 hdev
->io_capability
= 0x03; /* No Input No Output */
3982 hdev
->manufacturer
= 0xffff; /* Default to internal use */
3983 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
3984 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
3986 hdev
->sniff_max_interval
= 800;
3987 hdev
->sniff_min_interval
= 80;
3989 hdev
->le_adv_channel_map
= 0x07;
3990 hdev
->le_adv_min_interval
= 0x0800;
3991 hdev
->le_adv_max_interval
= 0x0800;
3992 hdev
->le_scan_interval
= 0x0060;
3993 hdev
->le_scan_window
= 0x0030;
3994 hdev
->le_conn_min_interval
= 0x0028;
3995 hdev
->le_conn_max_interval
= 0x0038;
3996 hdev
->le_conn_latency
= 0x0000;
3997 hdev
->le_supv_timeout
= 0x002a;
3999 hdev
->rpa_timeout
= HCI_DEFAULT_RPA_TIMEOUT
;
4000 hdev
->discov_interleaved_timeout
= DISCOV_INTERLEAVED_TIMEOUT
;
4001 hdev
->conn_info_min_age
= DEFAULT_CONN_INFO_MIN_AGE
;
4002 hdev
->conn_info_max_age
= DEFAULT_CONN_INFO_MAX_AGE
;
4004 mutex_init(&hdev
->lock
);
4005 mutex_init(&hdev
->req_lock
);
4007 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
4008 INIT_LIST_HEAD(&hdev
->blacklist
);
4009 INIT_LIST_HEAD(&hdev
->whitelist
);
4010 INIT_LIST_HEAD(&hdev
->uuids
);
4011 INIT_LIST_HEAD(&hdev
->link_keys
);
4012 INIT_LIST_HEAD(&hdev
->long_term_keys
);
4013 INIT_LIST_HEAD(&hdev
->identity_resolving_keys
);
4014 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
4015 INIT_LIST_HEAD(&hdev
->le_white_list
);
4016 INIT_LIST_HEAD(&hdev
->le_conn_params
);
4017 INIT_LIST_HEAD(&hdev
->pend_le_conns
);
4018 INIT_LIST_HEAD(&hdev
->pend_le_reports
);
4019 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
4021 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
4022 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
4023 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
4024 INIT_WORK(&hdev
->power_on
, hci_power_on
);
4026 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
4027 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
4028 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
4030 skb_queue_head_init(&hdev
->rx_q
);
4031 skb_queue_head_init(&hdev
->cmd_q
);
4032 skb_queue_head_init(&hdev
->raw_q
);
4034 init_waitqueue_head(&hdev
->req_wait_q
);
4036 INIT_DELAYED_WORK(&hdev
->cmd_timer
, hci_cmd_timeout
);
4038 hci_init_sysfs(hdev
);
4039 discovery_init(hdev
);
4043 EXPORT_SYMBOL(hci_alloc_dev
);
4045 /* Free HCI device */
4046 void hci_free_dev(struct hci_dev
*hdev
)
4048 /* will free via device release */
4049 put_device(&hdev
->dev
);
4051 EXPORT_SYMBOL(hci_free_dev
);
4053 /* Register HCI device */
4054 int hci_register_dev(struct hci_dev
*hdev
)
4058 if (!hdev
->open
|| !hdev
->close
|| !hdev
->send
)
4061 /* Do not allow HCI_AMP devices to register at index 0,
4062 * so the index can be used as the AMP controller ID.
4064 switch (hdev
->dev_type
) {
4066 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
4069 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
4078 sprintf(hdev
->name
, "hci%d", id
);
4081 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
4083 hdev
->workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
4084 WQ_MEM_RECLAIM
, 1, hdev
->name
);
4085 if (!hdev
->workqueue
) {
4090 hdev
->req_workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
4091 WQ_MEM_RECLAIM
, 1, hdev
->name
);
4092 if (!hdev
->req_workqueue
) {
4093 destroy_workqueue(hdev
->workqueue
);
4098 if (!IS_ERR_OR_NULL(bt_debugfs
))
4099 hdev
->debugfs
= debugfs_create_dir(hdev
->name
, bt_debugfs
);
4101 dev_set_name(&hdev
->dev
, "%s", hdev
->name
);
4103 error
= device_add(&hdev
->dev
);
4107 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
4108 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
4111 if (rfkill_register(hdev
->rfkill
) < 0) {
4112 rfkill_destroy(hdev
->rfkill
);
4113 hdev
->rfkill
= NULL
;
4117 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
))
4118 set_bit(HCI_RFKILLED
, &hdev
->dev_flags
);
4120 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
4121 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
4123 if (hdev
->dev_type
== HCI_BREDR
) {
4124 /* Assume BR/EDR support until proven otherwise (such as
4125 * through reading supported features during init.
4127 set_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4130 write_lock(&hci_dev_list_lock
);
4131 list_add(&hdev
->list
, &hci_dev_list
);
4132 write_unlock(&hci_dev_list_lock
);
4134 /* Devices that are marked for raw-only usage are unconfigured
4135 * and should not be included in normal operation.
4137 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
4138 set_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
);
4140 hci_notify(hdev
, HCI_DEV_REG
);
4143 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
4148 destroy_workqueue(hdev
->workqueue
);
4149 destroy_workqueue(hdev
->req_workqueue
);
4151 ida_simple_remove(&hci_index_ida
, hdev
->id
);
4155 EXPORT_SYMBOL(hci_register_dev
);
4157 /* Unregister HCI device */
4158 void hci_unregister_dev(struct hci_dev
*hdev
)
4162 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
4164 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
4168 write_lock(&hci_dev_list_lock
);
4169 list_del(&hdev
->list
);
4170 write_unlock(&hci_dev_list_lock
);
4172 hci_dev_do_close(hdev
);
4174 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
4175 kfree_skb(hdev
->reassembly
[i
]);
4177 cancel_work_sync(&hdev
->power_on
);
4179 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
4180 !test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
4181 !test_bit(HCI_CONFIG
, &hdev
->dev_flags
)) {
4183 mgmt_index_removed(hdev
);
4184 hci_dev_unlock(hdev
);
4187 /* mgmt_index_removed should take care of emptying the
4189 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
4191 hci_notify(hdev
, HCI_DEV_UNREG
);
4194 rfkill_unregister(hdev
->rfkill
);
4195 rfkill_destroy(hdev
->rfkill
);
4198 smp_unregister(hdev
);
4200 device_del(&hdev
->dev
);
4202 debugfs_remove_recursive(hdev
->debugfs
);
4204 destroy_workqueue(hdev
->workqueue
);
4205 destroy_workqueue(hdev
->req_workqueue
);
4208 hci_bdaddr_list_clear(&hdev
->blacklist
);
4209 hci_bdaddr_list_clear(&hdev
->whitelist
);
4210 hci_uuids_clear(hdev
);
4211 hci_link_keys_clear(hdev
);
4212 hci_smp_ltks_clear(hdev
);
4213 hci_smp_irks_clear(hdev
);
4214 hci_remote_oob_data_clear(hdev
);
4215 hci_bdaddr_list_clear(&hdev
->le_white_list
);
4216 hci_conn_params_clear_all(hdev
);
4217 hci_dev_unlock(hdev
);
4221 ida_simple_remove(&hci_index_ida
, id
);
4223 EXPORT_SYMBOL(hci_unregister_dev
);
4225 /* Suspend HCI device */
4226 int hci_suspend_dev(struct hci_dev
*hdev
)
4228 hci_notify(hdev
, HCI_DEV_SUSPEND
);
4231 EXPORT_SYMBOL(hci_suspend_dev
);
4233 /* Resume HCI device */
4234 int hci_resume_dev(struct hci_dev
*hdev
)
4236 hci_notify(hdev
, HCI_DEV_RESUME
);
4239 EXPORT_SYMBOL(hci_resume_dev
);
4241 /* Reset HCI device */
4242 int hci_reset_dev(struct hci_dev
*hdev
)
4244 const u8 hw_err
[] = { HCI_EV_HARDWARE_ERROR
, 0x01, 0x00 };
4245 struct sk_buff
*skb
;
4247 skb
= bt_skb_alloc(3, GFP_ATOMIC
);
4251 bt_cb(skb
)->pkt_type
= HCI_EVENT_PKT
;
4252 memcpy(skb_put(skb
, 3), hw_err
, 3);
4254 /* Send Hardware Error to upper stack */
4255 return hci_recv_frame(hdev
, skb
);
4257 EXPORT_SYMBOL(hci_reset_dev
);
4259 /* Receive frame from HCI drivers */
4260 int hci_recv_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4262 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
4263 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
4269 bt_cb(skb
)->incoming
= 1;
4272 __net_timestamp(skb
);
4274 skb_queue_tail(&hdev
->rx_q
, skb
);
4275 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
4279 EXPORT_SYMBOL(hci_recv_frame
);
4281 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
4282 int count
, __u8 index
)
4287 struct sk_buff
*skb
;
4288 struct bt_skb_cb
*scb
;
4290 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
4291 index
>= NUM_REASSEMBLY
)
4294 skb
= hdev
->reassembly
[index
];
4298 case HCI_ACLDATA_PKT
:
4299 len
= HCI_MAX_FRAME_SIZE
;
4300 hlen
= HCI_ACL_HDR_SIZE
;
4303 len
= HCI_MAX_EVENT_SIZE
;
4304 hlen
= HCI_EVENT_HDR_SIZE
;
4306 case HCI_SCODATA_PKT
:
4307 len
= HCI_MAX_SCO_SIZE
;
4308 hlen
= HCI_SCO_HDR_SIZE
;
4312 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4316 scb
= (void *) skb
->cb
;
4318 scb
->pkt_type
= type
;
4320 hdev
->reassembly
[index
] = skb
;
4324 scb
= (void *) skb
->cb
;
4325 len
= min_t(uint
, scb
->expect
, count
);
4327 memcpy(skb_put(skb
, len
), data
, len
);
4336 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
4337 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
4338 scb
->expect
= h
->plen
;
4340 if (skb_tailroom(skb
) < scb
->expect
) {
4342 hdev
->reassembly
[index
] = NULL
;
4348 case HCI_ACLDATA_PKT
:
4349 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
4350 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
4351 scb
->expect
= __le16_to_cpu(h
->dlen
);
4353 if (skb_tailroom(skb
) < scb
->expect
) {
4355 hdev
->reassembly
[index
] = NULL
;
4361 case HCI_SCODATA_PKT
:
4362 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
4363 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
4364 scb
->expect
= h
->dlen
;
4366 if (skb_tailroom(skb
) < scb
->expect
) {
4368 hdev
->reassembly
[index
] = NULL
;
4375 if (scb
->expect
== 0) {
4376 /* Complete frame */
4378 bt_cb(skb
)->pkt_type
= type
;
4379 hci_recv_frame(hdev
, skb
);
4381 hdev
->reassembly
[index
] = NULL
;
4389 #define STREAM_REASSEMBLY 0
4391 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
4397 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
4400 struct { char type
; } *pkt
;
4402 /* Start of the frame */
4409 type
= bt_cb(skb
)->pkt_type
;
4411 rem
= hci_reassembly(hdev
, type
, data
, count
,
4416 data
+= (count
- rem
);
4422 EXPORT_SYMBOL(hci_recv_stream_fragment
);
4424 /* ---- Interface to upper protocols ---- */
4426 int hci_register_cb(struct hci_cb
*cb
)
4428 BT_DBG("%p name %s", cb
, cb
->name
);
4430 write_lock(&hci_cb_list_lock
);
4431 list_add(&cb
->list
, &hci_cb_list
);
4432 write_unlock(&hci_cb_list_lock
);
4436 EXPORT_SYMBOL(hci_register_cb
);
4438 int hci_unregister_cb(struct hci_cb
*cb
)
4440 BT_DBG("%p name %s", cb
, cb
->name
);
4442 write_lock(&hci_cb_list_lock
);
4443 list_del(&cb
->list
);
4444 write_unlock(&hci_cb_list_lock
);
4448 EXPORT_SYMBOL(hci_unregister_cb
);
4450 static void hci_send_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4454 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
4457 __net_timestamp(skb
);
4459 /* Send copy to monitor */
4460 hci_send_to_monitor(hdev
, skb
);
4462 if (atomic_read(&hdev
->promisc
)) {
4463 /* Send copy to the sockets */
4464 hci_send_to_sock(hdev
, skb
);
4467 /* Get rid of skb owner, prior to sending to the driver. */
4470 err
= hdev
->send(hdev
, skb
);
4472 BT_ERR("%s sending frame failed (%d)", hdev
->name
, err
);
4477 void hci_req_init(struct hci_request
*req
, struct hci_dev
*hdev
)
4479 skb_queue_head_init(&req
->cmd_q
);
4484 int hci_req_run(struct hci_request
*req
, hci_req_complete_t complete
)
4486 struct hci_dev
*hdev
= req
->hdev
;
4487 struct sk_buff
*skb
;
4488 unsigned long flags
;
4490 BT_DBG("length %u", skb_queue_len(&req
->cmd_q
));
4492 /* If an error occurred during request building, remove all HCI
4493 * commands queued on the HCI request queue.
4496 skb_queue_purge(&req
->cmd_q
);
4500 /* Do not allow empty requests */
4501 if (skb_queue_empty(&req
->cmd_q
))
4504 skb
= skb_peek_tail(&req
->cmd_q
);
4505 bt_cb(skb
)->req
.complete
= complete
;
4507 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
4508 skb_queue_splice_tail(&req
->cmd_q
, &hdev
->cmd_q
);
4509 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
4511 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
4516 bool hci_req_pending(struct hci_dev
*hdev
)
4518 return (hdev
->req_status
== HCI_REQ_PEND
);
4521 static struct sk_buff
*hci_prepare_cmd(struct hci_dev
*hdev
, u16 opcode
,
4522 u32 plen
, const void *param
)
4524 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
4525 struct hci_command_hdr
*hdr
;
4526 struct sk_buff
*skb
;
4528 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4532 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
4533 hdr
->opcode
= cpu_to_le16(opcode
);
4537 memcpy(skb_put(skb
, plen
), param
, plen
);
4539 BT_DBG("skb len %d", skb
->len
);
4541 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
4542 bt_cb(skb
)->opcode
= opcode
;
4547 /* Send HCI command */
4548 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
,
4551 struct sk_buff
*skb
;
4553 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
4555 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
4557 BT_ERR("%s no memory for command", hdev
->name
);
4561 /* Stand-alone HCI commands must be flagged as
4562 * single-command requests.
4564 bt_cb(skb
)->req
.start
= true;
4566 skb_queue_tail(&hdev
->cmd_q
, skb
);
4567 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
4572 /* Queue a command to an asynchronous HCI request */
4573 void hci_req_add_ev(struct hci_request
*req
, u16 opcode
, u32 plen
,
4574 const void *param
, u8 event
)
4576 struct hci_dev
*hdev
= req
->hdev
;
4577 struct sk_buff
*skb
;
4579 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
4581 /* If an error occurred during request building, there is no point in
4582 * queueing the HCI command. We can simply return.
4587 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
4589 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4590 hdev
->name
, opcode
);
4595 if (skb_queue_empty(&req
->cmd_q
))
4596 bt_cb(skb
)->req
.start
= true;
4598 bt_cb(skb
)->req
.event
= event
;
4600 skb_queue_tail(&req
->cmd_q
, skb
);
4603 void hci_req_add(struct hci_request
*req
, u16 opcode
, u32 plen
,
4606 hci_req_add_ev(req
, opcode
, plen
, param
, 0);
4609 /* Get data from the previously sent command */
4610 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
4612 struct hci_command_hdr
*hdr
;
4614 if (!hdev
->sent_cmd
)
4617 hdr
= (void *) hdev
->sent_cmd
->data
;
4619 if (hdr
->opcode
!= cpu_to_le16(opcode
))
4622 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
4624 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
4628 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
4630 struct hci_acl_hdr
*hdr
;
4633 skb_push(skb
, HCI_ACL_HDR_SIZE
);
4634 skb_reset_transport_header(skb
);
4635 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
4636 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
4637 hdr
->dlen
= cpu_to_le16(len
);
4640 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
4641 struct sk_buff
*skb
, __u16 flags
)
4643 struct hci_conn
*conn
= chan
->conn
;
4644 struct hci_dev
*hdev
= conn
->hdev
;
4645 struct sk_buff
*list
;
4647 skb
->len
= skb_headlen(skb
);
4650 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
4652 switch (hdev
->dev_type
) {
4654 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
4657 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
4660 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
4664 list
= skb_shinfo(skb
)->frag_list
;
4666 /* Non fragmented */
4667 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
4669 skb_queue_tail(queue
, skb
);
4672 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
4674 skb_shinfo(skb
)->frag_list
= NULL
;
4676 /* Queue all fragments atomically. We need to use spin_lock_bh
4677 * here because of 6LoWPAN links, as there this function is
4678 * called from softirq and using normal spin lock could cause
4681 spin_lock_bh(&queue
->lock
);
4683 __skb_queue_tail(queue
, skb
);
4685 flags
&= ~ACL_START
;
4688 skb
= list
; list
= list
->next
;
4690 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
4691 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
4693 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
4695 __skb_queue_tail(queue
, skb
);
4698 spin_unlock_bh(&queue
->lock
);
4702 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
4704 struct hci_dev
*hdev
= chan
->conn
->hdev
;
4706 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
4708 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
4710 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
4714 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
4716 struct hci_dev
*hdev
= conn
->hdev
;
4717 struct hci_sco_hdr hdr
;
4719 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
4721 hdr
.handle
= cpu_to_le16(conn
->handle
);
4722 hdr
.dlen
= skb
->len
;
4724 skb_push(skb
, HCI_SCO_HDR_SIZE
);
4725 skb_reset_transport_header(skb
);
4726 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
4728 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
4730 skb_queue_tail(&conn
->data_q
, skb
);
4731 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
4734 /* ---- HCI TX task (outgoing data) ---- */
4736 /* HCI Connection scheduler */
4737 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
4740 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
4741 struct hci_conn
*conn
= NULL
, *c
;
4742 unsigned int num
= 0, min
= ~0;
4744 /* We don't have to lock device here. Connections are always
4745 * added and removed with TX task disabled. */
4749 list_for_each_entry_rcu(c
, &h
->list
, list
) {
4750 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
4753 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
4758 if (c
->sent
< min
) {
4763 if (hci_conn_num(hdev
, type
) == num
)
4772 switch (conn
->type
) {
4774 cnt
= hdev
->acl_cnt
;
4778 cnt
= hdev
->sco_cnt
;
4781 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
4785 BT_ERR("Unknown link type");
4793 BT_DBG("conn %p quote %d", conn
, *quote
);
4797 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
4799 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
4802 BT_ERR("%s link tx timeout", hdev
->name
);
4806 /* Kill stalled connections */
4807 list_for_each_entry_rcu(c
, &h
->list
, list
) {
4808 if (c
->type
== type
&& c
->sent
) {
4809 BT_ERR("%s killing stalled connection %pMR",
4810 hdev
->name
, &c
->dst
);
4811 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
4818 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
4821 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
4822 struct hci_chan
*chan
= NULL
;
4823 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
4824 struct hci_conn
*conn
;
4825 int cnt
, q
, conn_num
= 0;
4827 BT_DBG("%s", hdev
->name
);
4831 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
4832 struct hci_chan
*tmp
;
4834 if (conn
->type
!= type
)
4837 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
4842 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
4843 struct sk_buff
*skb
;
4845 if (skb_queue_empty(&tmp
->data_q
))
4848 skb
= skb_peek(&tmp
->data_q
);
4849 if (skb
->priority
< cur_prio
)
4852 if (skb
->priority
> cur_prio
) {
4855 cur_prio
= skb
->priority
;
4860 if (conn
->sent
< min
) {
4866 if (hci_conn_num(hdev
, type
) == conn_num
)
4875 switch (chan
->conn
->type
) {
4877 cnt
= hdev
->acl_cnt
;
4880 cnt
= hdev
->block_cnt
;
4884 cnt
= hdev
->sco_cnt
;
4887 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
4891 BT_ERR("Unknown link type");
4896 BT_DBG("chan %p quote %d", chan
, *quote
);
4900 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
4902 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
4903 struct hci_conn
*conn
;
4906 BT_DBG("%s", hdev
->name
);
4910 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
4911 struct hci_chan
*chan
;
4913 if (conn
->type
!= type
)
4916 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
4921 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
4922 struct sk_buff
*skb
;
4929 if (skb_queue_empty(&chan
->data_q
))
4932 skb
= skb_peek(&chan
->data_q
);
4933 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
4936 skb
->priority
= HCI_PRIO_MAX
- 1;
4938 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
4942 if (hci_conn_num(hdev
, type
) == num
)
4950 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4952 /* Calculate count of blocks used by this packet */
4953 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
4956 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
4958 if (!test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
)) {
4959 /* ACL tx timeout must be longer than maximum
4960 * link supervision timeout (40.9 seconds) */
4961 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
4962 HCI_ACL_TX_TIMEOUT
))
4963 hci_link_tx_to(hdev
, ACL_LINK
);
4967 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
4969 unsigned int cnt
= hdev
->acl_cnt
;
4970 struct hci_chan
*chan
;
4971 struct sk_buff
*skb
;
4974 __check_timeout(hdev
, cnt
);
4976 while (hdev
->acl_cnt
&&
4977 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
4978 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
4979 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
4980 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
4981 skb
->len
, skb
->priority
);
4983 /* Stop if priority has changed */
4984 if (skb
->priority
< priority
)
4987 skb
= skb_dequeue(&chan
->data_q
);
4989 hci_conn_enter_active_mode(chan
->conn
,
4990 bt_cb(skb
)->force_active
);
4992 hci_send_frame(hdev
, skb
);
4993 hdev
->acl_last_tx
= jiffies
;
5001 if (cnt
!= hdev
->acl_cnt
)
5002 hci_prio_recalculate(hdev
, ACL_LINK
);
5005 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
5007 unsigned int cnt
= hdev
->block_cnt
;
5008 struct hci_chan
*chan
;
5009 struct sk_buff
*skb
;
5013 __check_timeout(hdev
, cnt
);
5015 BT_DBG("%s", hdev
->name
);
5017 if (hdev
->dev_type
== HCI_AMP
)
5022 while (hdev
->block_cnt
> 0 &&
5023 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
5024 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
5025 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
5028 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
5029 skb
->len
, skb
->priority
);
5031 /* Stop if priority has changed */
5032 if (skb
->priority
< priority
)
5035 skb
= skb_dequeue(&chan
->data_q
);
5037 blocks
= __get_blocks(hdev
, skb
);
5038 if (blocks
> hdev
->block_cnt
)
5041 hci_conn_enter_active_mode(chan
->conn
,
5042 bt_cb(skb
)->force_active
);
5044 hci_send_frame(hdev
, skb
);
5045 hdev
->acl_last_tx
= jiffies
;
5047 hdev
->block_cnt
-= blocks
;
5050 chan
->sent
+= blocks
;
5051 chan
->conn
->sent
+= blocks
;
5055 if (cnt
!= hdev
->block_cnt
)
5056 hci_prio_recalculate(hdev
, type
);
5059 static void hci_sched_acl(struct hci_dev
*hdev
)
5061 BT_DBG("%s", hdev
->name
);
5063 /* No ACL link over BR/EDR controller */
5064 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
5067 /* No AMP link over AMP controller */
5068 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
5071 switch (hdev
->flow_ctl_mode
) {
5072 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
5073 hci_sched_acl_pkt(hdev
);
5076 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
5077 hci_sched_acl_blk(hdev
);
5083 static void hci_sched_sco(struct hci_dev
*hdev
)
5085 struct hci_conn
*conn
;
5086 struct sk_buff
*skb
;
5089 BT_DBG("%s", hdev
->name
);
5091 if (!hci_conn_num(hdev
, SCO_LINK
))
5094 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
5095 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
5096 BT_DBG("skb %p len %d", skb
, skb
->len
);
5097 hci_send_frame(hdev
, skb
);
5100 if (conn
->sent
== ~0)
5106 static void hci_sched_esco(struct hci_dev
*hdev
)
5108 struct hci_conn
*conn
;
5109 struct sk_buff
*skb
;
5112 BT_DBG("%s", hdev
->name
);
5114 if (!hci_conn_num(hdev
, ESCO_LINK
))
5117 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
5119 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
5120 BT_DBG("skb %p len %d", skb
, skb
->len
);
5121 hci_send_frame(hdev
, skb
);
5124 if (conn
->sent
== ~0)
5130 static void hci_sched_le(struct hci_dev
*hdev
)
5132 struct hci_chan
*chan
;
5133 struct sk_buff
*skb
;
5134 int quote
, cnt
, tmp
;
5136 BT_DBG("%s", hdev
->name
);
5138 if (!hci_conn_num(hdev
, LE_LINK
))
5141 if (!test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
)) {
5142 /* LE tx timeout must be longer than maximum
5143 * link supervision timeout (40.9 seconds) */
5144 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
5145 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
5146 hci_link_tx_to(hdev
, LE_LINK
);
5149 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
5151 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
5152 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
5153 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
5154 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
5155 skb
->len
, skb
->priority
);
5157 /* Stop if priority has changed */
5158 if (skb
->priority
< priority
)
5161 skb
= skb_dequeue(&chan
->data_q
);
5163 hci_send_frame(hdev
, skb
);
5164 hdev
->le_last_tx
= jiffies
;
5175 hdev
->acl_cnt
= cnt
;
5178 hci_prio_recalculate(hdev
, LE_LINK
);
5181 static void hci_tx_work(struct work_struct
*work
)
5183 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
5184 struct sk_buff
*skb
;
5186 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
5187 hdev
->sco_cnt
, hdev
->le_cnt
);
5189 if (!test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
5190 /* Schedule queues and send stuff to HCI driver */
5191 hci_sched_acl(hdev
);
5192 hci_sched_sco(hdev
);
5193 hci_sched_esco(hdev
);
5197 /* Send next queued raw (unknown type) packet */
5198 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
5199 hci_send_frame(hdev
, skb
);
5202 /* ----- HCI RX task (incoming data processing) ----- */
5204 /* ACL data packet */
5205 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
5207 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
5208 struct hci_conn
*conn
;
5209 __u16 handle
, flags
;
5211 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
5213 handle
= __le16_to_cpu(hdr
->handle
);
5214 flags
= hci_flags(handle
);
5215 handle
= hci_handle(handle
);
5217 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
5220 hdev
->stat
.acl_rx
++;
5223 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5224 hci_dev_unlock(hdev
);
5227 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
5229 /* Send to upper protocol */
5230 l2cap_recv_acldata(conn
, skb
, flags
);
5233 BT_ERR("%s ACL packet for unknown connection handle %d",
5234 hdev
->name
, handle
);
5240 /* SCO data packet */
5241 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
5243 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
5244 struct hci_conn
*conn
;
5247 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
5249 handle
= __le16_to_cpu(hdr
->handle
);
5251 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
5253 hdev
->stat
.sco_rx
++;
5256 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5257 hci_dev_unlock(hdev
);
5260 /* Send to upper protocol */
5261 sco_recv_scodata(conn
, skb
);
5264 BT_ERR("%s SCO packet for unknown connection handle %d",
5265 hdev
->name
, handle
);
5271 static bool hci_req_is_complete(struct hci_dev
*hdev
)
5273 struct sk_buff
*skb
;
5275 skb
= skb_peek(&hdev
->cmd_q
);
5279 return bt_cb(skb
)->req
.start
;
5282 static void hci_resend_last(struct hci_dev
*hdev
)
5284 struct hci_command_hdr
*sent
;
5285 struct sk_buff
*skb
;
5288 if (!hdev
->sent_cmd
)
5291 sent
= (void *) hdev
->sent_cmd
->data
;
5292 opcode
= __le16_to_cpu(sent
->opcode
);
5293 if (opcode
== HCI_OP_RESET
)
5296 skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
5300 skb_queue_head(&hdev
->cmd_q
, skb
);
5301 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
5304 void hci_req_cmd_complete(struct hci_dev
*hdev
, u16 opcode
, u8 status
)
5306 hci_req_complete_t req_complete
= NULL
;
5307 struct sk_buff
*skb
;
5308 unsigned long flags
;
5310 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
5312 /* If the completed command doesn't match the last one that was
5313 * sent we need to do special handling of it.
5315 if (!hci_sent_cmd_data(hdev
, opcode
)) {
5316 /* Some CSR based controllers generate a spontaneous
5317 * reset complete event during init and any pending
5318 * command will never be completed. In such a case we
5319 * need to resend whatever was the last sent
5322 if (test_bit(HCI_INIT
, &hdev
->flags
) && opcode
== HCI_OP_RESET
)
5323 hci_resend_last(hdev
);
5328 /* If the command succeeded and there's still more commands in
5329 * this request the request is not yet complete.
5331 if (!status
&& !hci_req_is_complete(hdev
))
5334 /* If this was the last command in a request the complete
5335 * callback would be found in hdev->sent_cmd instead of the
5336 * command queue (hdev->cmd_q).
5338 if (hdev
->sent_cmd
) {
5339 req_complete
= bt_cb(hdev
->sent_cmd
)->req
.complete
;
5342 /* We must set the complete callback to NULL to
5343 * avoid calling the callback more than once if
5344 * this function gets called again.
5346 bt_cb(hdev
->sent_cmd
)->req
.complete
= NULL
;
5352 /* Remove all pending commands belonging to this request */
5353 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
5354 while ((skb
= __skb_dequeue(&hdev
->cmd_q
))) {
5355 if (bt_cb(skb
)->req
.start
) {
5356 __skb_queue_head(&hdev
->cmd_q
, skb
);
5360 req_complete
= bt_cb(skb
)->req
.complete
;
5363 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
5367 req_complete(hdev
, status
);
5370 static void hci_rx_work(struct work_struct
*work
)
5372 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
5373 struct sk_buff
*skb
;
5375 BT_DBG("%s", hdev
->name
);
5377 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
5378 /* Send copy to monitor */
5379 hci_send_to_monitor(hdev
, skb
);
5381 if (atomic_read(&hdev
->promisc
)) {
5382 /* Send copy to the sockets */
5383 hci_send_to_sock(hdev
, skb
);
5386 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
5391 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
5392 /* Don't process data packets in this states. */
5393 switch (bt_cb(skb
)->pkt_type
) {
5394 case HCI_ACLDATA_PKT
:
5395 case HCI_SCODATA_PKT
:
5402 switch (bt_cb(skb
)->pkt_type
) {
5404 BT_DBG("%s Event packet", hdev
->name
);
5405 hci_event_packet(hdev
, skb
);
5408 case HCI_ACLDATA_PKT
:
5409 BT_DBG("%s ACL data packet", hdev
->name
);
5410 hci_acldata_packet(hdev
, skb
);
5413 case HCI_SCODATA_PKT
:
5414 BT_DBG("%s SCO data packet", hdev
->name
);
5415 hci_scodata_packet(hdev
, skb
);
5425 static void hci_cmd_work(struct work_struct
*work
)
5427 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
5428 struct sk_buff
*skb
;
5430 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
5431 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
5433 /* Send queued commands */
5434 if (atomic_read(&hdev
->cmd_cnt
)) {
5435 skb
= skb_dequeue(&hdev
->cmd_q
);
5439 kfree_skb(hdev
->sent_cmd
);
5441 hdev
->sent_cmd
= skb_clone(skb
, GFP_KERNEL
);
5442 if (hdev
->sent_cmd
) {
5443 atomic_dec(&hdev
->cmd_cnt
);
5444 hci_send_frame(hdev
, skb
);
5445 if (test_bit(HCI_RESET
, &hdev
->flags
))
5446 cancel_delayed_work(&hdev
->cmd_timer
);
5448 schedule_delayed_work(&hdev
->cmd_timer
,
5451 skb_queue_head(&hdev
->cmd_q
, skb
);
5452 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
5457 void hci_req_add_le_scan_disable(struct hci_request
*req
)
5459 struct hci_cp_le_set_scan_enable cp
;
5461 memset(&cp
, 0, sizeof(cp
));
5462 cp
.enable
= LE_SCAN_DISABLE
;
5463 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
5466 static void add_to_white_list(struct hci_request
*req
,
5467 struct hci_conn_params
*params
)
5469 struct hci_cp_le_add_to_white_list cp
;
5471 cp
.bdaddr_type
= params
->addr_type
;
5472 bacpy(&cp
.bdaddr
, ¶ms
->addr
);
5474 hci_req_add(req
, HCI_OP_LE_ADD_TO_WHITE_LIST
, sizeof(cp
), &cp
);
5477 static u8
update_white_list(struct hci_request
*req
)
5479 struct hci_dev
*hdev
= req
->hdev
;
5480 struct hci_conn_params
*params
;
5481 struct bdaddr_list
*b
;
5482 uint8_t white_list_entries
= 0;
5484 /* Go through the current white list programmed into the
5485 * controller one by one and check if that address is still
5486 * in the list of pending connections or list of devices to
5487 * report. If not present in either list, then queue the
5488 * command to remove it from the controller.
5490 list_for_each_entry(b
, &hdev
->le_white_list
, list
) {
5491 struct hci_cp_le_del_from_white_list cp
;
5493 if (hci_pend_le_action_lookup(&hdev
->pend_le_conns
,
5494 &b
->bdaddr
, b
->bdaddr_type
) ||
5495 hci_pend_le_action_lookup(&hdev
->pend_le_reports
,
5496 &b
->bdaddr
, b
->bdaddr_type
)) {
5497 white_list_entries
++;
5501 cp
.bdaddr_type
= b
->bdaddr_type
;
5502 bacpy(&cp
.bdaddr
, &b
->bdaddr
);
5504 hci_req_add(req
, HCI_OP_LE_DEL_FROM_WHITE_LIST
,
5508 /* Since all no longer valid white list entries have been
5509 * removed, walk through the list of pending connections
5510 * and ensure that any new device gets programmed into
5513 * If the list of the devices is larger than the list of
5514 * available white list entries in the controller, then
5515 * just abort and return filer policy value to not use the
5518 list_for_each_entry(params
, &hdev
->pend_le_conns
, action
) {
5519 if (hci_bdaddr_list_lookup(&hdev
->le_white_list
,
5520 ¶ms
->addr
, params
->addr_type
))
5523 if (white_list_entries
>= hdev
->le_white_list_size
) {
5524 /* Select filter policy to accept all advertising */
5528 if (hci_find_irk_by_addr(hdev
, ¶ms
->addr
,
5529 params
->addr_type
)) {
5530 /* White list can not be used with RPAs */
5534 white_list_entries
++;
5535 add_to_white_list(req
, params
);
5538 /* After adding all new pending connections, walk through
5539 * the list of pending reports and also add these to the
5540 * white list if there is still space.
5542 list_for_each_entry(params
, &hdev
->pend_le_reports
, action
) {
5543 if (hci_bdaddr_list_lookup(&hdev
->le_white_list
,
5544 ¶ms
->addr
, params
->addr_type
))
5547 if (white_list_entries
>= hdev
->le_white_list_size
) {
5548 /* Select filter policy to accept all advertising */
5552 if (hci_find_irk_by_addr(hdev
, ¶ms
->addr
,
5553 params
->addr_type
)) {
5554 /* White list can not be used with RPAs */
5558 white_list_entries
++;
5559 add_to_white_list(req
, params
);
5562 /* Select filter policy to use white list */
5566 void hci_req_add_le_passive_scan(struct hci_request
*req
)
5568 struct hci_cp_le_set_scan_param param_cp
;
5569 struct hci_cp_le_set_scan_enable enable_cp
;
5570 struct hci_dev
*hdev
= req
->hdev
;
5574 /* Set require_privacy to false since no SCAN_REQ are send
5575 * during passive scanning. Not using an unresolvable address
5576 * here is important so that peer devices using direct
5577 * advertising with our address will be correctly reported
5578 * by the controller.
5580 if (hci_update_random_address(req
, false, &own_addr_type
))
5583 /* Adding or removing entries from the white list must
5584 * happen before enabling scanning. The controller does
5585 * not allow white list modification while scanning.
5587 filter_policy
= update_white_list(req
);
5589 memset(¶m_cp
, 0, sizeof(param_cp
));
5590 param_cp
.type
= LE_SCAN_PASSIVE
;
5591 param_cp
.interval
= cpu_to_le16(hdev
->le_scan_interval
);
5592 param_cp
.window
= cpu_to_le16(hdev
->le_scan_window
);
5593 param_cp
.own_address_type
= own_addr_type
;
5594 param_cp
.filter_policy
= filter_policy
;
5595 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
5598 memset(&enable_cp
, 0, sizeof(enable_cp
));
5599 enable_cp
.enable
= LE_SCAN_ENABLE
;
5600 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
5601 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
5605 static void update_background_scan_complete(struct hci_dev
*hdev
, u8 status
)
5608 BT_DBG("HCI request failed to update background scanning: "
5609 "status 0x%2.2x", status
);
5612 /* This function controls the background scanning based on hdev->pend_le_conns
5613 * list. If there are pending LE connection we start the background scanning,
5614 * otherwise we stop it.
5616 * This function requires the caller holds hdev->lock.
5618 void hci_update_background_scan(struct hci_dev
*hdev
)
5620 struct hci_request req
;
5621 struct hci_conn
*conn
;
5624 if (!test_bit(HCI_UP
, &hdev
->flags
) ||
5625 test_bit(HCI_INIT
, &hdev
->flags
) ||
5626 test_bit(HCI_SETUP
, &hdev
->dev_flags
) ||
5627 test_bit(HCI_CONFIG
, &hdev
->dev_flags
) ||
5628 test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
) ||
5629 test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
))
5632 /* No point in doing scanning if LE support hasn't been enabled */
5633 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
5636 /* If discovery is active don't interfere with it */
5637 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
)
5640 hci_req_init(&req
, hdev
);
5642 if (list_empty(&hdev
->pend_le_conns
) &&
5643 list_empty(&hdev
->pend_le_reports
)) {
5644 /* If there is no pending LE connections or devices
5645 * to be scanned for, we should stop the background
5649 /* If controller is not scanning we are done. */
5650 if (!test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
5653 hci_req_add_le_scan_disable(&req
);
5655 BT_DBG("%s stopping background scanning", hdev
->name
);
5657 /* If there is at least one pending LE connection, we should
5658 * keep the background scan running.
5661 /* If controller is connecting, we should not start scanning
5662 * since some controllers are not able to scan and connect at
5665 conn
= hci_conn_hash_lookup_state(hdev
, LE_LINK
, BT_CONNECT
);
5669 /* If controller is currently scanning, we stop it to ensure we
5670 * don't miss any advertising (due to duplicates filter).
5672 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
5673 hci_req_add_le_scan_disable(&req
);
5675 hci_req_add_le_passive_scan(&req
);
5677 BT_DBG("%s starting background scanning", hdev
->name
);
5680 err
= hci_req_run(&req
, update_background_scan_complete
);
5682 BT_ERR("Failed to run HCI request: err %d", err
);
5685 static bool disconnected_whitelist_entries(struct hci_dev
*hdev
)
5687 struct bdaddr_list
*b
;
5689 list_for_each_entry(b
, &hdev
->whitelist
, list
) {
5690 struct hci_conn
*conn
;
5692 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &b
->bdaddr
);
5696 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
5703 void hci_update_page_scan(struct hci_dev
*hdev
, struct hci_request
*req
)
5707 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
5710 if (!hdev_is_powered(hdev
))
5713 if (mgmt_powering_down(hdev
))
5716 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
) ||
5717 disconnected_whitelist_entries(hdev
))
5720 scan
= SCAN_DISABLED
;
5722 if (test_bit(HCI_PSCAN
, &hdev
->flags
) == !!(scan
& SCAN_PAGE
))
5725 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
5726 scan
|= SCAN_INQUIRY
;
5729 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
5731 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);