2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
44 static void hci_rx_work(struct work_struct
*work
);
45 static void hci_cmd_work(struct work_struct
*work
);
46 static void hci_tx_work(struct work_struct
*work
);
49 LIST_HEAD(hci_dev_list
);
50 DEFINE_RWLOCK(hci_dev_list_lock
);
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list
);
54 DEFINE_MUTEX(hci_cb_list_lock
);
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida
);
59 /* ---- HCI debugfs entries ---- */
61 static ssize_t
dut_mode_read(struct file
*file
, char __user
*user_buf
,
62 size_t count
, loff_t
*ppos
)
64 struct hci_dev
*hdev
= file
->private_data
;
67 buf
[0] = hci_dev_test_flag(hdev
, HCI_DUT_MODE
) ? 'Y' : 'N';
70 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
73 static ssize_t
dut_mode_write(struct file
*file
, const char __user
*user_buf
,
74 size_t count
, loff_t
*ppos
)
76 struct hci_dev
*hdev
= file
->private_data
;
79 size_t buf_size
= min(count
, (sizeof(buf
)-1));
82 if (!test_bit(HCI_UP
, &hdev
->flags
))
85 if (copy_from_user(buf
, user_buf
, buf_size
))
89 if (strtobool(buf
, &enable
))
92 if (enable
== hci_dev_test_flag(hdev
, HCI_DUT_MODE
))
95 hci_req_sync_lock(hdev
);
97 skb
= __hci_cmd_sync(hdev
, HCI_OP_ENABLE_DUT_MODE
, 0, NULL
,
100 skb
= __hci_cmd_sync(hdev
, HCI_OP_RESET
, 0, NULL
,
102 hci_req_sync_unlock(hdev
);
109 hci_dev_change_flag(hdev
, HCI_DUT_MODE
);
114 static const struct file_operations dut_mode_fops
= {
116 .read
= dut_mode_read
,
117 .write
= dut_mode_write
,
118 .llseek
= default_llseek
,
121 static ssize_t
vendor_diag_read(struct file
*file
, char __user
*user_buf
,
122 size_t count
, loff_t
*ppos
)
124 struct hci_dev
*hdev
= file
->private_data
;
127 buf
[0] = hci_dev_test_flag(hdev
, HCI_VENDOR_DIAG
) ? 'Y' : 'N';
130 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
133 static ssize_t
vendor_diag_write(struct file
*file
, const char __user
*user_buf
,
134 size_t count
, loff_t
*ppos
)
136 struct hci_dev
*hdev
= file
->private_data
;
138 size_t buf_size
= min(count
, (sizeof(buf
)-1));
142 if (copy_from_user(buf
, user_buf
, buf_size
))
145 buf
[buf_size
] = '\0';
146 if (strtobool(buf
, &enable
))
149 /* When the diagnostic flags are not persistent and the transport
150 * is not active, then there is no need for the vendor callback.
152 * Instead just store the desired value. If needed the setting
153 * will be programmed when the controller gets powered on.
155 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG
, &hdev
->quirks
) &&
156 !test_bit(HCI_RUNNING
, &hdev
->flags
))
159 hci_req_sync_lock(hdev
);
160 err
= hdev
->set_diag(hdev
, enable
);
161 hci_req_sync_unlock(hdev
);
168 hci_dev_set_flag(hdev
, HCI_VENDOR_DIAG
);
170 hci_dev_clear_flag(hdev
, HCI_VENDOR_DIAG
);
175 static const struct file_operations vendor_diag_fops
= {
177 .read
= vendor_diag_read
,
178 .write
= vendor_diag_write
,
179 .llseek
= default_llseek
,
182 static void hci_debugfs_create_basic(struct hci_dev
*hdev
)
184 debugfs_create_file("dut_mode", 0644, hdev
->debugfs
, hdev
,
188 debugfs_create_file("vendor_diag", 0644, hdev
->debugfs
, hdev
,
192 static int hci_reset_req(struct hci_request
*req
, unsigned long opt
)
194 BT_DBG("%s %ld", req
->hdev
->name
, opt
);
197 set_bit(HCI_RESET
, &req
->hdev
->flags
);
198 hci_req_add(req
, HCI_OP_RESET
, 0, NULL
);
202 static void bredr_init(struct hci_request
*req
)
204 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
206 /* Read Local Supported Features */
207 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
209 /* Read Local Version */
210 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
212 /* Read BD Address */
213 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
216 static void amp_init1(struct hci_request
*req
)
218 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
220 /* Read Local Version */
221 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
223 /* Read Local Supported Commands */
224 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
226 /* Read Local AMP Info */
227 hci_req_add(req
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
229 /* Read Data Blk size */
230 hci_req_add(req
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
232 /* Read Flow Control Mode */
233 hci_req_add(req
, HCI_OP_READ_FLOW_CONTROL_MODE
, 0, NULL
);
235 /* Read Location Data */
236 hci_req_add(req
, HCI_OP_READ_LOCATION_DATA
, 0, NULL
);
239 static int amp_init2(struct hci_request
*req
)
241 /* Read Local Supported Features. Not all AMP controllers
242 * support this so it's placed conditionally in the second
245 if (req
->hdev
->commands
[14] & 0x20)
246 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
251 static int hci_init1_req(struct hci_request
*req
, unsigned long opt
)
253 struct hci_dev
*hdev
= req
->hdev
;
255 BT_DBG("%s %ld", hdev
->name
, opt
);
258 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
259 hci_reset_req(req
, 0);
261 switch (hdev
->dev_type
) {
271 BT_ERR("Unknown device type %d", hdev
->dev_type
);
278 static void bredr_setup(struct hci_request
*req
)
283 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
284 hci_req_add(req
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
286 /* Read Class of Device */
287 hci_req_add(req
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
289 /* Read Local Name */
290 hci_req_add(req
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
292 /* Read Voice Setting */
293 hci_req_add(req
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
295 /* Read Number of Supported IAC */
296 hci_req_add(req
, HCI_OP_READ_NUM_SUPPORTED_IAC
, 0, NULL
);
298 /* Read Current IAC LAP */
299 hci_req_add(req
, HCI_OP_READ_CURRENT_IAC_LAP
, 0, NULL
);
301 /* Clear Event Filters */
302 flt_type
= HCI_FLT_CLEAR_ALL
;
303 hci_req_add(req
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
305 /* Connection accept timeout ~20 secs */
306 param
= cpu_to_le16(0x7d00);
307 hci_req_add(req
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
310 static void le_setup(struct hci_request
*req
)
312 struct hci_dev
*hdev
= req
->hdev
;
314 /* Read LE Buffer Size */
315 hci_req_add(req
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
317 /* Read LE Local Supported Features */
318 hci_req_add(req
, HCI_OP_LE_READ_LOCAL_FEATURES
, 0, NULL
);
320 /* Read LE Supported States */
321 hci_req_add(req
, HCI_OP_LE_READ_SUPPORTED_STATES
, 0, NULL
);
323 /* LE-only controllers have LE implicitly enabled */
324 if (!lmp_bredr_capable(hdev
))
325 hci_dev_set_flag(hdev
, HCI_LE_ENABLED
);
328 static void hci_setup_event_mask(struct hci_request
*req
)
330 struct hci_dev
*hdev
= req
->hdev
;
332 /* The second byte is 0xff instead of 0x9f (two reserved bits
333 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
336 u8 events
[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
338 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
339 * any event mask for pre 1.2 devices.
341 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
344 if (lmp_bredr_capable(hdev
)) {
345 events
[4] |= 0x01; /* Flow Specification Complete */
347 /* Use a different default for LE-only devices */
348 memset(events
, 0, sizeof(events
));
349 events
[1] |= 0x20; /* Command Complete */
350 events
[1] |= 0x40; /* Command Status */
351 events
[1] |= 0x80; /* Hardware Error */
353 /* If the controller supports the Disconnect command, enable
354 * the corresponding event. In addition enable packet flow
355 * control related events.
357 if (hdev
->commands
[0] & 0x20) {
358 events
[0] |= 0x10; /* Disconnection Complete */
359 events
[2] |= 0x04; /* Number of Completed Packets */
360 events
[3] |= 0x02; /* Data Buffer Overflow */
363 /* If the controller supports the Read Remote Version
364 * Information command, enable the corresponding event.
366 if (hdev
->commands
[2] & 0x80)
367 events
[1] |= 0x08; /* Read Remote Version Information
371 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
) {
372 events
[0] |= 0x80; /* Encryption Change */
373 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
377 if (lmp_inq_rssi_capable(hdev
) ||
378 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE
, &hdev
->quirks
))
379 events
[4] |= 0x02; /* Inquiry Result with RSSI */
381 if (lmp_ext_feat_capable(hdev
))
382 events
[4] |= 0x04; /* Read Remote Extended Features Complete */
384 if (lmp_esco_capable(hdev
)) {
385 events
[5] |= 0x08; /* Synchronous Connection Complete */
386 events
[5] |= 0x10; /* Synchronous Connection Changed */
389 if (lmp_sniffsubr_capable(hdev
))
390 events
[5] |= 0x20; /* Sniff Subrating */
392 if (lmp_pause_enc_capable(hdev
))
393 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
395 if (lmp_ext_inq_capable(hdev
))
396 events
[5] |= 0x40; /* Extended Inquiry Result */
398 if (lmp_no_flush_capable(hdev
))
399 events
[7] |= 0x01; /* Enhanced Flush Complete */
401 if (lmp_lsto_capable(hdev
))
402 events
[6] |= 0x80; /* Link Supervision Timeout Changed */
404 if (lmp_ssp_capable(hdev
)) {
405 events
[6] |= 0x01; /* IO Capability Request */
406 events
[6] |= 0x02; /* IO Capability Response */
407 events
[6] |= 0x04; /* User Confirmation Request */
408 events
[6] |= 0x08; /* User Passkey Request */
409 events
[6] |= 0x10; /* Remote OOB Data Request */
410 events
[6] |= 0x20; /* Simple Pairing Complete */
411 events
[7] |= 0x04; /* User Passkey Notification */
412 events
[7] |= 0x08; /* Keypress Notification */
413 events
[7] |= 0x10; /* Remote Host Supported
414 * Features Notification
418 if (lmp_le_capable(hdev
))
419 events
[7] |= 0x20; /* LE Meta-Event */
421 hci_req_add(req
, HCI_OP_SET_EVENT_MASK
, sizeof(events
), events
);
424 static int hci_init2_req(struct hci_request
*req
, unsigned long opt
)
426 struct hci_dev
*hdev
= req
->hdev
;
428 if (hdev
->dev_type
== HCI_AMP
)
429 return amp_init2(req
);
431 if (lmp_bredr_capable(hdev
))
434 hci_dev_clear_flag(hdev
, HCI_BREDR_ENABLED
);
436 if (lmp_le_capable(hdev
))
439 /* All Bluetooth 1.2 and later controllers should support the
440 * HCI command for reading the local supported commands.
442 * Unfortunately some controllers indicate Bluetooth 1.2 support,
443 * but do not have support for this command. If that is the case,
444 * the driver can quirk the behavior and skip reading the local
445 * supported commands.
447 if (hdev
->hci_ver
> BLUETOOTH_VER_1_1
&&
448 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS
, &hdev
->quirks
))
449 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
451 if (lmp_ssp_capable(hdev
)) {
452 /* When SSP is available, then the host features page
453 * should also be available as well. However some
454 * controllers list the max_page as 0 as long as SSP
455 * has not been enabled. To achieve proper debugging
456 * output, force the minimum max_page to 1 at least.
458 hdev
->max_page
= 0x01;
460 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
463 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
,
464 sizeof(mode
), &mode
);
466 struct hci_cp_write_eir cp
;
468 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
469 memset(&cp
, 0, sizeof(cp
));
471 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
475 if (lmp_inq_rssi_capable(hdev
) ||
476 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE
, &hdev
->quirks
)) {
479 /* If Extended Inquiry Result events are supported, then
480 * they are clearly preferred over Inquiry Result with RSSI
483 mode
= lmp_ext_inq_capable(hdev
) ? 0x02 : 0x01;
485 hci_req_add(req
, HCI_OP_WRITE_INQUIRY_MODE
, 1, &mode
);
488 if (lmp_inq_tx_pwr_capable(hdev
))
489 hci_req_add(req
, HCI_OP_READ_INQ_RSP_TX_POWER
, 0, NULL
);
491 if (lmp_ext_feat_capable(hdev
)) {
492 struct hci_cp_read_local_ext_features cp
;
495 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
499 if (hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
)) {
501 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(enable
),
508 static void hci_setup_link_policy(struct hci_request
*req
)
510 struct hci_dev
*hdev
= req
->hdev
;
511 struct hci_cp_write_def_link_policy cp
;
514 if (lmp_rswitch_capable(hdev
))
515 link_policy
|= HCI_LP_RSWITCH
;
516 if (lmp_hold_capable(hdev
))
517 link_policy
|= HCI_LP_HOLD
;
518 if (lmp_sniff_capable(hdev
))
519 link_policy
|= HCI_LP_SNIFF
;
520 if (lmp_park_capable(hdev
))
521 link_policy
|= HCI_LP_PARK
;
523 cp
.policy
= cpu_to_le16(link_policy
);
524 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, sizeof(cp
), &cp
);
527 static void hci_set_le_support(struct hci_request
*req
)
529 struct hci_dev
*hdev
= req
->hdev
;
530 struct hci_cp_write_le_host_supported cp
;
532 /* LE-only devices do not support explicit enablement */
533 if (!lmp_bredr_capable(hdev
))
536 memset(&cp
, 0, sizeof(cp
));
538 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
543 if (cp
.le
!= lmp_host_le_capable(hdev
))
544 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(cp
),
548 static void hci_set_event_mask_page_2(struct hci_request
*req
)
550 struct hci_dev
*hdev
= req
->hdev
;
551 u8 events
[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
553 /* If Connectionless Slave Broadcast master role is supported
554 * enable all necessary events for it.
556 if (lmp_csb_master_capable(hdev
)) {
557 events
[1] |= 0x40; /* Triggered Clock Capture */
558 events
[1] |= 0x80; /* Synchronization Train Complete */
559 events
[2] |= 0x10; /* Slave Page Response Timeout */
560 events
[2] |= 0x20; /* CSB Channel Map Change */
563 /* If Connectionless Slave Broadcast slave role is supported
564 * enable all necessary events for it.
566 if (lmp_csb_slave_capable(hdev
)) {
567 events
[2] |= 0x01; /* Synchronization Train Received */
568 events
[2] |= 0x02; /* CSB Receive */
569 events
[2] |= 0x04; /* CSB Timeout */
570 events
[2] |= 0x08; /* Truncated Page Complete */
573 /* Enable Authenticated Payload Timeout Expired event if supported */
574 if (lmp_ping_capable(hdev
) || hdev
->le_features
[0] & HCI_LE_PING
)
577 hci_req_add(req
, HCI_OP_SET_EVENT_MASK_PAGE_2
, sizeof(events
), events
);
580 static int hci_init3_req(struct hci_request
*req
, unsigned long opt
)
582 struct hci_dev
*hdev
= req
->hdev
;
585 hci_setup_event_mask(req
);
587 if (hdev
->commands
[6] & 0x20 &&
588 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
589 struct hci_cp_read_stored_link_key cp
;
591 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
593 hci_req_add(req
, HCI_OP_READ_STORED_LINK_KEY
, sizeof(cp
), &cp
);
596 if (hdev
->commands
[5] & 0x10)
597 hci_setup_link_policy(req
);
599 if (hdev
->commands
[8] & 0x01)
600 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_ACTIVITY
, 0, NULL
);
602 /* Some older Broadcom based Bluetooth 1.2 controllers do not
603 * support the Read Page Scan Type command. Check support for
604 * this command in the bit mask of supported commands.
606 if (hdev
->commands
[13] & 0x01)
607 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_TYPE
, 0, NULL
);
609 if (lmp_le_capable(hdev
)) {
612 memset(events
, 0, sizeof(events
));
614 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
)
615 events
[0] |= 0x10; /* LE Long Term Key Request */
617 /* If controller supports the Connection Parameters Request
618 * Link Layer Procedure, enable the corresponding event.
620 if (hdev
->le_features
[0] & HCI_LE_CONN_PARAM_REQ_PROC
)
621 events
[0] |= 0x20; /* LE Remote Connection
625 /* If the controller supports the Data Length Extension
626 * feature, enable the corresponding event.
628 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
)
629 events
[0] |= 0x40; /* LE Data Length Change */
631 /* If the controller supports Extended Scanner Filter
632 * Policies, enable the correspondig event.
634 if (hdev
->le_features
[0] & HCI_LE_EXT_SCAN_POLICY
)
635 events
[1] |= 0x04; /* LE Direct Advertising
639 /* If the controller supports the LE Set Scan Enable command,
640 * enable the corresponding advertising report event.
642 if (hdev
->commands
[26] & 0x08)
643 events
[0] |= 0x02; /* LE Advertising Report */
645 /* If the controller supports the LE Create Connection
646 * command, enable the corresponding event.
648 if (hdev
->commands
[26] & 0x10)
649 events
[0] |= 0x01; /* LE Connection Complete */
651 /* If the controller supports the LE Connection Update
652 * command, enable the corresponding event.
654 if (hdev
->commands
[27] & 0x04)
655 events
[0] |= 0x04; /* LE Connection Update
659 /* If the controller supports the LE Read Remote Used Features
660 * command, enable the corresponding event.
662 if (hdev
->commands
[27] & 0x20)
663 events
[0] |= 0x08; /* LE Read Remote Used
667 /* If the controller supports the LE Read Local P-256
668 * Public Key command, enable the corresponding event.
670 if (hdev
->commands
[34] & 0x02)
671 events
[0] |= 0x80; /* LE Read Local P-256
672 * Public Key Complete
675 /* If the controller supports the LE Generate DHKey
676 * command, enable the corresponding event.
678 if (hdev
->commands
[34] & 0x04)
679 events
[1] |= 0x01; /* LE Generate DHKey Complete */
681 hci_req_add(req
, HCI_OP_LE_SET_EVENT_MASK
, sizeof(events
),
684 if (hdev
->commands
[25] & 0x40) {
685 /* Read LE Advertising Channel TX Power */
686 hci_req_add(req
, HCI_OP_LE_READ_ADV_TX_POWER
, 0, NULL
);
689 if (hdev
->commands
[26] & 0x40) {
690 /* Read LE White List Size */
691 hci_req_add(req
, HCI_OP_LE_READ_WHITE_LIST_SIZE
,
695 if (hdev
->commands
[26] & 0x80) {
696 /* Clear LE White List */
697 hci_req_add(req
, HCI_OP_LE_CLEAR_WHITE_LIST
, 0, NULL
);
700 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
) {
701 /* Read LE Maximum Data Length */
702 hci_req_add(req
, HCI_OP_LE_READ_MAX_DATA_LEN
, 0, NULL
);
704 /* Read LE Suggested Default Data Length */
705 hci_req_add(req
, HCI_OP_LE_READ_DEF_DATA_LEN
, 0, NULL
);
708 hci_set_le_support(req
);
711 /* Read features beyond page 1 if available */
712 for (p
= 2; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
713 struct hci_cp_read_local_ext_features cp
;
716 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
723 static int hci_init4_req(struct hci_request
*req
, unsigned long opt
)
725 struct hci_dev
*hdev
= req
->hdev
;
727 /* Some Broadcom based Bluetooth controllers do not support the
728 * Delete Stored Link Key command. They are clearly indicating its
729 * absence in the bit mask of supported commands.
731 * Check the supported commands and only if the the command is marked
732 * as supported send it. If not supported assume that the controller
733 * does not have actual support for stored link keys which makes this
734 * command redundant anyway.
736 * Some controllers indicate that they support handling deleting
737 * stored link keys, but they don't. The quirk lets a driver
738 * just disable this command.
740 if (hdev
->commands
[6] & 0x80 &&
741 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
742 struct hci_cp_delete_stored_link_key cp
;
744 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
745 cp
.delete_all
= 0x01;
746 hci_req_add(req
, HCI_OP_DELETE_STORED_LINK_KEY
,
750 /* Set event mask page 2 if the HCI command for it is supported */
751 if (hdev
->commands
[22] & 0x04)
752 hci_set_event_mask_page_2(req
);
754 /* Read local codec list if the HCI command is supported */
755 if (hdev
->commands
[29] & 0x20)
756 hci_req_add(req
, HCI_OP_READ_LOCAL_CODECS
, 0, NULL
);
758 /* Get MWS transport configuration if the HCI command is supported */
759 if (hdev
->commands
[30] & 0x08)
760 hci_req_add(req
, HCI_OP_GET_MWS_TRANSPORT_CONFIG
, 0, NULL
);
762 /* Check for Synchronization Train support */
763 if (lmp_sync_train_capable(hdev
))
764 hci_req_add(req
, HCI_OP_READ_SYNC_TRAIN_PARAMS
, 0, NULL
);
766 /* Enable Secure Connections if supported and configured */
767 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
) &&
768 bredr_sc_enabled(hdev
)) {
771 hci_req_add(req
, HCI_OP_WRITE_SC_SUPPORT
,
772 sizeof(support
), &support
);
778 static int __hci_init(struct hci_dev
*hdev
)
782 err
= __hci_req_sync(hdev
, hci_init1_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
786 if (hci_dev_test_flag(hdev
, HCI_SETUP
))
787 hci_debugfs_create_basic(hdev
);
789 err
= __hci_req_sync(hdev
, hci_init2_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
793 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
794 * BR/EDR/LE type controllers. AMP controllers only need the
795 * first two stages of init.
797 if (hdev
->dev_type
!= HCI_BREDR
)
800 err
= __hci_req_sync(hdev
, hci_init3_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
804 err
= __hci_req_sync(hdev
, hci_init4_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
808 /* This function is only called when the controller is actually in
809 * configured state. When the controller is marked as unconfigured,
810 * this initialization procedure is not run.
812 * It means that it is possible that a controller runs through its
813 * setup phase and then discovers missing settings. If that is the
814 * case, then this function will not be called. It then will only
815 * be called during the config phase.
817 * So only when in setup phase or config phase, create the debugfs
818 * entries and register the SMP channels.
820 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
821 !hci_dev_test_flag(hdev
, HCI_CONFIG
))
824 hci_debugfs_create_common(hdev
);
826 if (lmp_bredr_capable(hdev
))
827 hci_debugfs_create_bredr(hdev
);
829 if (lmp_le_capable(hdev
))
830 hci_debugfs_create_le(hdev
);
835 static int hci_init0_req(struct hci_request
*req
, unsigned long opt
)
837 struct hci_dev
*hdev
= req
->hdev
;
839 BT_DBG("%s %ld", hdev
->name
, opt
);
842 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
843 hci_reset_req(req
, 0);
845 /* Read Local Version */
846 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
848 /* Read BD Address */
849 if (hdev
->set_bdaddr
)
850 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
855 static int __hci_unconf_init(struct hci_dev
*hdev
)
859 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
862 err
= __hci_req_sync(hdev
, hci_init0_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
866 if (hci_dev_test_flag(hdev
, HCI_SETUP
))
867 hci_debugfs_create_basic(hdev
);
872 static int hci_scan_req(struct hci_request
*req
, unsigned long opt
)
876 BT_DBG("%s %x", req
->hdev
->name
, scan
);
878 /* Inquiry and Page scans */
879 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
883 static int hci_auth_req(struct hci_request
*req
, unsigned long opt
)
887 BT_DBG("%s %x", req
->hdev
->name
, auth
);
890 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
894 static int hci_encrypt_req(struct hci_request
*req
, unsigned long opt
)
898 BT_DBG("%s %x", req
->hdev
->name
, encrypt
);
901 hci_req_add(req
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
905 static int hci_linkpol_req(struct hci_request
*req
, unsigned long opt
)
907 __le16 policy
= cpu_to_le16(opt
);
909 BT_DBG("%s %x", req
->hdev
->name
, policy
);
911 /* Default link policy */
912 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
916 /* Get HCI device by index.
917 * Device is held on return. */
918 struct hci_dev
*hci_dev_get(int index
)
920 struct hci_dev
*hdev
= NULL
, *d
;
927 read_lock(&hci_dev_list_lock
);
928 list_for_each_entry(d
, &hci_dev_list
, list
) {
929 if (d
->id
== index
) {
930 hdev
= hci_dev_hold(d
);
934 read_unlock(&hci_dev_list_lock
);
938 /* ---- Inquiry support ---- */
940 bool hci_discovery_active(struct hci_dev
*hdev
)
942 struct discovery_state
*discov
= &hdev
->discovery
;
944 switch (discov
->state
) {
945 case DISCOVERY_FINDING
:
946 case DISCOVERY_RESOLVING
:
954 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
956 int old_state
= hdev
->discovery
.state
;
958 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
960 if (old_state
== state
)
963 hdev
->discovery
.state
= state
;
966 case DISCOVERY_STOPPED
:
967 hci_update_background_scan(hdev
);
969 if (old_state
!= DISCOVERY_STARTING
)
970 mgmt_discovering(hdev
, 0);
972 case DISCOVERY_STARTING
:
974 case DISCOVERY_FINDING
:
975 mgmt_discovering(hdev
, 1);
977 case DISCOVERY_RESOLVING
:
979 case DISCOVERY_STOPPING
:
984 void hci_inquiry_cache_flush(struct hci_dev
*hdev
)
986 struct discovery_state
*cache
= &hdev
->discovery
;
987 struct inquiry_entry
*p
, *n
;
989 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
994 INIT_LIST_HEAD(&cache
->unknown
);
995 INIT_LIST_HEAD(&cache
->resolve
);
998 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
1001 struct discovery_state
*cache
= &hdev
->discovery
;
1002 struct inquiry_entry
*e
;
1004 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1006 list_for_each_entry(e
, &cache
->all
, all
) {
1007 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1014 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
1017 struct discovery_state
*cache
= &hdev
->discovery
;
1018 struct inquiry_entry
*e
;
1020 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1022 list_for_each_entry(e
, &cache
->unknown
, list
) {
1023 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1030 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
1034 struct discovery_state
*cache
= &hdev
->discovery
;
1035 struct inquiry_entry
*e
;
1037 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
1039 list_for_each_entry(e
, &cache
->resolve
, list
) {
1040 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
1042 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1049 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
1050 struct inquiry_entry
*ie
)
1052 struct discovery_state
*cache
= &hdev
->discovery
;
1053 struct list_head
*pos
= &cache
->resolve
;
1054 struct inquiry_entry
*p
;
1056 list_del(&ie
->list
);
1058 list_for_each_entry(p
, &cache
->resolve
, list
) {
1059 if (p
->name_state
!= NAME_PENDING
&&
1060 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
1065 list_add(&ie
->list
, pos
);
1068 u32
hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
1071 struct discovery_state
*cache
= &hdev
->discovery
;
1072 struct inquiry_entry
*ie
;
1075 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
1077 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
, BDADDR_BREDR
);
1079 if (!data
->ssp_mode
)
1080 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
1082 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
1084 if (!ie
->data
.ssp_mode
)
1085 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
1087 if (ie
->name_state
== NAME_NEEDED
&&
1088 data
->rssi
!= ie
->data
.rssi
) {
1089 ie
->data
.rssi
= data
->rssi
;
1090 hci_inquiry_cache_update_resolve(hdev
, ie
);
1096 /* Entry not in the cache. Add new one. */
1097 ie
= kzalloc(sizeof(*ie
), GFP_KERNEL
);
1099 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
1103 list_add(&ie
->all
, &cache
->all
);
1106 ie
->name_state
= NAME_KNOWN
;
1108 ie
->name_state
= NAME_NOT_KNOWN
;
1109 list_add(&ie
->list
, &cache
->unknown
);
1113 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
1114 ie
->name_state
!= NAME_PENDING
) {
1115 ie
->name_state
= NAME_KNOWN
;
1116 list_del(&ie
->list
);
1119 memcpy(&ie
->data
, data
, sizeof(*data
));
1120 ie
->timestamp
= jiffies
;
1121 cache
->timestamp
= jiffies
;
1123 if (ie
->name_state
== NAME_NOT_KNOWN
)
1124 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
1130 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
1132 struct discovery_state
*cache
= &hdev
->discovery
;
1133 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
1134 struct inquiry_entry
*e
;
1137 list_for_each_entry(e
, &cache
->all
, all
) {
1138 struct inquiry_data
*data
= &e
->data
;
1143 bacpy(&info
->bdaddr
, &data
->bdaddr
);
1144 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
1145 info
->pscan_period_mode
= data
->pscan_period_mode
;
1146 info
->pscan_mode
= data
->pscan_mode
;
1147 memcpy(info
->dev_class
, data
->dev_class
, 3);
1148 info
->clock_offset
= data
->clock_offset
;
1154 BT_DBG("cache %p, copied %d", cache
, copied
);
1158 static int hci_inq_req(struct hci_request
*req
, unsigned long opt
)
1160 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
1161 struct hci_dev
*hdev
= req
->hdev
;
1162 struct hci_cp_inquiry cp
;
1164 BT_DBG("%s", hdev
->name
);
1166 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
1170 memcpy(&cp
.lap
, &ir
->lap
, 3);
1171 cp
.length
= ir
->length
;
1172 cp
.num_rsp
= ir
->num_rsp
;
1173 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
1178 int hci_inquiry(void __user
*arg
)
1180 __u8 __user
*ptr
= arg
;
1181 struct hci_inquiry_req ir
;
1182 struct hci_dev
*hdev
;
1183 int err
= 0, do_inquiry
= 0, max_rsp
;
1187 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
1190 hdev
= hci_dev_get(ir
.dev_id
);
1194 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1199 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1204 if (hdev
->dev_type
!= HCI_BREDR
) {
1209 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1215 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
1216 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
1217 hci_inquiry_cache_flush(hdev
);
1220 hci_dev_unlock(hdev
);
1222 timeo
= ir
.length
* msecs_to_jiffies(2000);
1225 err
= hci_req_sync(hdev
, hci_inq_req
, (unsigned long) &ir
,
1230 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1231 * cleared). If it is interrupted by a signal, return -EINTR.
1233 if (wait_on_bit(&hdev
->flags
, HCI_INQUIRY
,
1234 TASK_INTERRUPTIBLE
))
1238 /* for unlimited number of responses we will use buffer with
1241 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
1243 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1244 * copy it to the user space.
1246 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
1253 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
1254 hci_dev_unlock(hdev
);
1256 BT_DBG("num_rsp %d", ir
.num_rsp
);
1258 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
1260 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
1273 static int hci_dev_do_open(struct hci_dev
*hdev
)
1277 BT_DBG("%s %p", hdev
->name
, hdev
);
1279 hci_req_sync_lock(hdev
);
1281 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
)) {
1286 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1287 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
1288 /* Check for rfkill but allow the HCI setup stage to
1289 * proceed (which in itself doesn't cause any RF activity).
1291 if (hci_dev_test_flag(hdev
, HCI_RFKILLED
)) {
1296 /* Check for valid public address or a configured static
1297 * random adddress, but let the HCI setup proceed to
1298 * be able to determine if there is a public address
1301 * In case of user channel usage, it is not important
1302 * if a public address or static random address is
1305 * This check is only valid for BR/EDR controllers
1306 * since AMP controllers do not have an address.
1308 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1309 hdev
->dev_type
== HCI_BREDR
&&
1310 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
1311 !bacmp(&hdev
->static_addr
, BDADDR_ANY
)) {
1312 ret
= -EADDRNOTAVAIL
;
1317 if (test_bit(HCI_UP
, &hdev
->flags
)) {
1322 if (hdev
->open(hdev
)) {
1327 set_bit(HCI_RUNNING
, &hdev
->flags
);
1328 hci_sock_dev_event(hdev
, HCI_DEV_OPEN
);
1330 atomic_set(&hdev
->cmd_cnt
, 1);
1331 set_bit(HCI_INIT
, &hdev
->flags
);
1333 if (hci_dev_test_flag(hdev
, HCI_SETUP
)) {
1334 hci_sock_dev_event(hdev
, HCI_DEV_SETUP
);
1337 ret
= hdev
->setup(hdev
);
1339 /* The transport driver can set these quirks before
1340 * creating the HCI device or in its setup callback.
1342 * In case any of them is set, the controller has to
1343 * start up as unconfigured.
1345 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
1346 test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
))
1347 hci_dev_set_flag(hdev
, HCI_UNCONFIGURED
);
1349 /* For an unconfigured controller it is required to
1350 * read at least the version information provided by
1351 * the Read Local Version Information command.
1353 * If the set_bdaddr driver callback is provided, then
1354 * also the original Bluetooth public device address
1355 * will be read using the Read BD Address command.
1357 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
1358 ret
= __hci_unconf_init(hdev
);
1361 if (hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
1362 /* If public address change is configured, ensure that
1363 * the address gets programmed. If the driver does not
1364 * support changing the public address, fail the power
1367 if (bacmp(&hdev
->public_addr
, BDADDR_ANY
) &&
1369 ret
= hdev
->set_bdaddr(hdev
, &hdev
->public_addr
);
1371 ret
= -EADDRNOTAVAIL
;
1375 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1376 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1377 ret
= __hci_init(hdev
);
1378 if (!ret
&& hdev
->post_init
)
1379 ret
= hdev
->post_init(hdev
);
1383 /* If the HCI Reset command is clearing all diagnostic settings,
1384 * then they need to be reprogrammed after the init procedure
1387 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG
, &hdev
->quirks
) &&
1388 hci_dev_test_flag(hdev
, HCI_VENDOR_DIAG
) && hdev
->set_diag
)
1389 ret
= hdev
->set_diag(hdev
, true);
1391 clear_bit(HCI_INIT
, &hdev
->flags
);
1395 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1396 set_bit(HCI_UP
, &hdev
->flags
);
1397 hci_sock_dev_event(hdev
, HCI_DEV_UP
);
1398 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1399 !hci_dev_test_flag(hdev
, HCI_CONFIG
) &&
1400 !hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1401 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1402 hci_dev_test_flag(hdev
, HCI_MGMT
) &&
1403 hdev
->dev_type
== HCI_BREDR
) {
1404 ret
= __hci_req_hci_power_on(hdev
);
1405 mgmt_power_on(hdev
, ret
);
1408 /* Init failed, cleanup */
1409 flush_work(&hdev
->tx_work
);
1410 flush_work(&hdev
->cmd_work
);
1411 flush_work(&hdev
->rx_work
);
1413 skb_queue_purge(&hdev
->cmd_q
);
1414 skb_queue_purge(&hdev
->rx_q
);
1419 if (hdev
->sent_cmd
) {
1420 kfree_skb(hdev
->sent_cmd
);
1421 hdev
->sent_cmd
= NULL
;
1424 clear_bit(HCI_RUNNING
, &hdev
->flags
);
1425 hci_sock_dev_event(hdev
, HCI_DEV_CLOSE
);
1428 hdev
->flags
&= BIT(HCI_RAW
);
1432 hci_req_sync_unlock(hdev
);
1436 /* ---- HCI ioctl helpers ---- */
1438 int hci_dev_open(__u16 dev
)
1440 struct hci_dev
*hdev
;
1443 hdev
= hci_dev_get(dev
);
1447 /* Devices that are marked as unconfigured can only be powered
1448 * up as user channel. Trying to bring them up as normal devices
1449 * will result into a failure. Only user channel operation is
1452 * When this function is called for a user channel, the flag
1453 * HCI_USER_CHANNEL will be set first before attempting to
1456 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1457 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1462 /* We need to ensure that no other power on/off work is pending
1463 * before proceeding to call hci_dev_do_open. This is
1464 * particularly important if the setup procedure has not yet
1467 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
1468 cancel_delayed_work(&hdev
->power_off
);
1470 /* After this call it is guaranteed that the setup procedure
1471 * has finished. This means that error conditions like RFKILL
1472 * or no valid public or static random address apply.
1474 flush_workqueue(hdev
->req_workqueue
);
1476 /* For controllers not using the management interface and that
1477 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1478 * so that pairing works for them. Once the management interface
1479 * is in use this bit will be cleared again and userspace has
1480 * to explicitly enable it.
1482 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1483 !hci_dev_test_flag(hdev
, HCI_MGMT
))
1484 hci_dev_set_flag(hdev
, HCI_BONDABLE
);
1486 err
= hci_dev_do_open(hdev
);
1493 /* This function requires the caller holds hdev->lock */
1494 static void hci_pend_le_actions_clear(struct hci_dev
*hdev
)
1496 struct hci_conn_params
*p
;
1498 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
1500 hci_conn_drop(p
->conn
);
1501 hci_conn_put(p
->conn
);
1504 list_del_init(&p
->action
);
1507 BT_DBG("All LE pending actions cleared");
1510 int hci_dev_do_close(struct hci_dev
*hdev
)
1514 BT_DBG("%s %p", hdev
->name
, hdev
);
1516 if (!hci_dev_test_flag(hdev
, HCI_UNREGISTER
) &&
1517 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1518 test_bit(HCI_UP
, &hdev
->flags
)) {
1519 /* Execute vendor specific shutdown routine */
1521 hdev
->shutdown(hdev
);
1524 cancel_delayed_work(&hdev
->power_off
);
1526 hci_request_cancel_all(hdev
);
1527 hci_req_sync_lock(hdev
);
1529 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
1530 cancel_delayed_work_sync(&hdev
->cmd_timer
);
1531 hci_req_sync_unlock(hdev
);
1535 /* Flush RX and TX works */
1536 flush_work(&hdev
->tx_work
);
1537 flush_work(&hdev
->rx_work
);
1539 if (hdev
->discov_timeout
> 0) {
1540 hdev
->discov_timeout
= 0;
1541 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1542 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1545 if (hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
))
1546 cancel_delayed_work(&hdev
->service_cache
);
1548 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
1549 cancel_delayed_work_sync(&hdev
->rpa_expired
);
1551 /* Avoid potential lockdep warnings from the *_flush() calls by
1552 * ensuring the workqueue is empty up front.
1554 drain_workqueue(hdev
->workqueue
);
1558 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
1560 auto_off
= hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
);
1562 if (!auto_off
&& hdev
->dev_type
== HCI_BREDR
&&
1563 hci_dev_test_flag(hdev
, HCI_MGMT
))
1564 __mgmt_power_off(hdev
);
1566 hci_inquiry_cache_flush(hdev
);
1567 hci_pend_le_actions_clear(hdev
);
1568 hci_conn_hash_flush(hdev
);
1569 hci_dev_unlock(hdev
);
1571 smp_unregister(hdev
);
1573 hci_sock_dev_event(hdev
, HCI_DEV_DOWN
);
1579 skb_queue_purge(&hdev
->cmd_q
);
1580 atomic_set(&hdev
->cmd_cnt
, 1);
1581 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
) &&
1582 !auto_off
&& !hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1583 set_bit(HCI_INIT
, &hdev
->flags
);
1584 __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
, NULL
);
1585 clear_bit(HCI_INIT
, &hdev
->flags
);
1588 /* flush cmd work */
1589 flush_work(&hdev
->cmd_work
);
1592 skb_queue_purge(&hdev
->rx_q
);
1593 skb_queue_purge(&hdev
->cmd_q
);
1594 skb_queue_purge(&hdev
->raw_q
);
1596 /* Drop last sent command */
1597 if (hdev
->sent_cmd
) {
1598 cancel_delayed_work_sync(&hdev
->cmd_timer
);
1599 kfree_skb(hdev
->sent_cmd
);
1600 hdev
->sent_cmd
= NULL
;
1603 clear_bit(HCI_RUNNING
, &hdev
->flags
);
1604 hci_sock_dev_event(hdev
, HCI_DEV_CLOSE
);
1606 /* After this point our queues are empty
1607 * and no tasks are scheduled. */
1611 hdev
->flags
&= BIT(HCI_RAW
);
1612 hci_dev_clear_volatile_flags(hdev
);
1614 /* Controller radio is available but is currently powered down */
1615 hdev
->amp_status
= AMP_STATUS_POWERED_DOWN
;
1617 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
1618 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
1619 bacpy(&hdev
->random_addr
, BDADDR_ANY
);
1621 hci_req_sync_unlock(hdev
);
1627 int hci_dev_close(__u16 dev
)
1629 struct hci_dev
*hdev
;
1632 hdev
= hci_dev_get(dev
);
1636 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1641 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
1642 cancel_delayed_work(&hdev
->power_off
);
1644 err
= hci_dev_do_close(hdev
);
1651 static int hci_dev_do_reset(struct hci_dev
*hdev
)
1655 BT_DBG("%s %p", hdev
->name
, hdev
);
1657 hci_req_sync_lock(hdev
);
1660 skb_queue_purge(&hdev
->rx_q
);
1661 skb_queue_purge(&hdev
->cmd_q
);
1663 /* Avoid potential lockdep warnings from the *_flush() calls by
1664 * ensuring the workqueue is empty up front.
1666 drain_workqueue(hdev
->workqueue
);
1669 hci_inquiry_cache_flush(hdev
);
1670 hci_conn_hash_flush(hdev
);
1671 hci_dev_unlock(hdev
);
1676 atomic_set(&hdev
->cmd_cnt
, 1);
1677 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
1679 ret
= __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
1681 hci_req_sync_unlock(hdev
);
1685 int hci_dev_reset(__u16 dev
)
1687 struct hci_dev
*hdev
;
1690 hdev
= hci_dev_get(dev
);
1694 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
1699 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1704 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1709 err
= hci_dev_do_reset(hdev
);
1716 int hci_dev_reset_stat(__u16 dev
)
1718 struct hci_dev
*hdev
;
1721 hdev
= hci_dev_get(dev
);
1725 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1730 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1735 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1742 static void hci_update_scan_state(struct hci_dev
*hdev
, u8 scan
)
1744 bool conn_changed
, discov_changed
;
1746 BT_DBG("%s scan 0x%02x", hdev
->name
, scan
);
1748 if ((scan
& SCAN_PAGE
))
1749 conn_changed
= !hci_dev_test_and_set_flag(hdev
,
1752 conn_changed
= hci_dev_test_and_clear_flag(hdev
,
1755 if ((scan
& SCAN_INQUIRY
)) {
1756 discov_changed
= !hci_dev_test_and_set_flag(hdev
,
1759 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1760 discov_changed
= hci_dev_test_and_clear_flag(hdev
,
1764 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
1767 if (conn_changed
|| discov_changed
) {
1768 /* In case this was disabled through mgmt */
1769 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
1771 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1772 hci_req_update_adv_data(hdev
, hdev
->cur_adv_instance
);
1774 mgmt_new_settings(hdev
);
1778 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
1780 struct hci_dev
*hdev
;
1781 struct hci_dev_req dr
;
1784 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
1787 hdev
= hci_dev_get(dr
.dev_id
);
1791 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1796 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1801 if (hdev
->dev_type
!= HCI_BREDR
) {
1806 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1813 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1814 HCI_INIT_TIMEOUT
, NULL
);
1818 if (!lmp_encrypt_capable(hdev
)) {
1823 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
1824 /* Auth must be enabled first */
1825 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1826 HCI_INIT_TIMEOUT
, NULL
);
1831 err
= hci_req_sync(hdev
, hci_encrypt_req
, dr
.dev_opt
,
1832 HCI_INIT_TIMEOUT
, NULL
);
1836 err
= hci_req_sync(hdev
, hci_scan_req
, dr
.dev_opt
,
1837 HCI_INIT_TIMEOUT
, NULL
);
1839 /* Ensure that the connectable and discoverable states
1840 * get correctly modified as this was a non-mgmt change.
1843 hci_update_scan_state(hdev
, dr
.dev_opt
);
1847 err
= hci_req_sync(hdev
, hci_linkpol_req
, dr
.dev_opt
,
1848 HCI_INIT_TIMEOUT
, NULL
);
1851 case HCISETLINKMODE
:
1852 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
1853 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
1857 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
1861 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1862 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1866 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1867 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1880 int hci_get_dev_list(void __user
*arg
)
1882 struct hci_dev
*hdev
;
1883 struct hci_dev_list_req
*dl
;
1884 struct hci_dev_req
*dr
;
1885 int n
= 0, size
, err
;
1888 if (get_user(dev_num
, (__u16 __user
*) arg
))
1891 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
1894 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
1896 dl
= kzalloc(size
, GFP_KERNEL
);
1902 read_lock(&hci_dev_list_lock
);
1903 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1904 unsigned long flags
= hdev
->flags
;
1906 /* When the auto-off is configured it means the transport
1907 * is running, but in that case still indicate that the
1908 * device is actually down.
1910 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
1911 flags
&= ~BIT(HCI_UP
);
1913 (dr
+ n
)->dev_id
= hdev
->id
;
1914 (dr
+ n
)->dev_opt
= flags
;
1919 read_unlock(&hci_dev_list_lock
);
1922 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1924 err
= copy_to_user(arg
, dl
, size
);
1927 return err
? -EFAULT
: 0;
1930 int hci_get_dev_info(void __user
*arg
)
1932 struct hci_dev
*hdev
;
1933 struct hci_dev_info di
;
1934 unsigned long flags
;
1937 if (copy_from_user(&di
, arg
, sizeof(di
)))
1940 hdev
= hci_dev_get(di
.dev_id
);
1944 /* When the auto-off is configured it means the transport
1945 * is running, but in that case still indicate that the
1946 * device is actually down.
1948 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
1949 flags
= hdev
->flags
& ~BIT(HCI_UP
);
1951 flags
= hdev
->flags
;
1953 strcpy(di
.name
, hdev
->name
);
1954 di
.bdaddr
= hdev
->bdaddr
;
1955 di
.type
= (hdev
->bus
& 0x0f) | ((hdev
->dev_type
& 0x03) << 4);
1957 di
.pkt_type
= hdev
->pkt_type
;
1958 if (lmp_bredr_capable(hdev
)) {
1959 di
.acl_mtu
= hdev
->acl_mtu
;
1960 di
.acl_pkts
= hdev
->acl_pkts
;
1961 di
.sco_mtu
= hdev
->sco_mtu
;
1962 di
.sco_pkts
= hdev
->sco_pkts
;
1964 di
.acl_mtu
= hdev
->le_mtu
;
1965 di
.acl_pkts
= hdev
->le_pkts
;
1969 di
.link_policy
= hdev
->link_policy
;
1970 di
.link_mode
= hdev
->link_mode
;
1972 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1973 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1975 if (copy_to_user(arg
, &di
, sizeof(di
)))
1983 /* ---- Interface to HCI drivers ---- */
1985 static int hci_rfkill_set_block(void *data
, bool blocked
)
1987 struct hci_dev
*hdev
= data
;
1989 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1991 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
1995 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
1996 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1997 !hci_dev_test_flag(hdev
, HCI_CONFIG
))
1998 hci_dev_do_close(hdev
);
2000 hci_dev_clear_flag(hdev
, HCI_RFKILLED
);
2006 static const struct rfkill_ops hci_rfkill_ops
= {
2007 .set_block
= hci_rfkill_set_block
,
2010 static void hci_power_on(struct work_struct
*work
)
2012 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
2015 BT_DBG("%s", hdev
->name
);
2017 if (test_bit(HCI_UP
, &hdev
->flags
) &&
2018 hci_dev_test_flag(hdev
, HCI_MGMT
) &&
2019 hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
)) {
2020 hci_req_sync_lock(hdev
);
2021 err
= __hci_req_hci_power_on(hdev
);
2022 hci_req_sync_unlock(hdev
);
2023 mgmt_power_on(hdev
, err
);
2027 err
= hci_dev_do_open(hdev
);
2030 mgmt_set_powered_failed(hdev
, err
);
2031 hci_dev_unlock(hdev
);
2035 /* During the HCI setup phase, a few error conditions are
2036 * ignored and they need to be checked now. If they are still
2037 * valid, it is important to turn the device back off.
2039 if (hci_dev_test_flag(hdev
, HCI_RFKILLED
) ||
2040 hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) ||
2041 (hdev
->dev_type
== HCI_BREDR
&&
2042 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
2043 !bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
2044 hci_dev_clear_flag(hdev
, HCI_AUTO_OFF
);
2045 hci_dev_do_close(hdev
);
2046 } else if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
)) {
2047 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
2048 HCI_AUTO_OFF_TIMEOUT
);
2051 if (hci_dev_test_and_clear_flag(hdev
, HCI_SETUP
)) {
2052 /* For unconfigured devices, set the HCI_RAW flag
2053 * so that userspace can easily identify them.
2055 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
2056 set_bit(HCI_RAW
, &hdev
->flags
);
2058 /* For fully configured devices, this will send
2059 * the Index Added event. For unconfigured devices,
2060 * it will send Unconfigued Index Added event.
2062 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2063 * and no event will be send.
2065 mgmt_index_added(hdev
);
2066 } else if (hci_dev_test_and_clear_flag(hdev
, HCI_CONFIG
)) {
2067 /* When the controller is now configured, then it
2068 * is important to clear the HCI_RAW flag.
2070 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
2071 clear_bit(HCI_RAW
, &hdev
->flags
);
2073 /* Powering on the controller with HCI_CONFIG set only
2074 * happens with the transition from unconfigured to
2075 * configured. This will send the Index Added event.
2077 mgmt_index_added(hdev
);
2081 static void hci_power_off(struct work_struct
*work
)
2083 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2086 BT_DBG("%s", hdev
->name
);
2088 hci_dev_do_close(hdev
);
2091 static void hci_error_reset(struct work_struct
*work
)
2093 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, error_reset
);
2095 BT_DBG("%s", hdev
->name
);
2098 hdev
->hw_error(hdev
, hdev
->hw_error_code
);
2100 BT_ERR("%s hardware error 0x%2.2x", hdev
->name
,
2101 hdev
->hw_error_code
);
2103 if (hci_dev_do_close(hdev
))
2106 hci_dev_do_open(hdev
);
2109 void hci_uuids_clear(struct hci_dev
*hdev
)
2111 struct bt_uuid
*uuid
, *tmp
;
2113 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
2114 list_del(&uuid
->list
);
2119 void hci_link_keys_clear(struct hci_dev
*hdev
)
2121 struct link_key
*key
;
2123 list_for_each_entry_rcu(key
, &hdev
->link_keys
, list
) {
2124 list_del_rcu(&key
->list
);
2125 kfree_rcu(key
, rcu
);
2129 void hci_smp_ltks_clear(struct hci_dev
*hdev
)
2133 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2134 list_del_rcu(&k
->list
);
2139 void hci_smp_irks_clear(struct hci_dev
*hdev
)
2143 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
2144 list_del_rcu(&k
->list
);
2149 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2154 list_for_each_entry_rcu(k
, &hdev
->link_keys
, list
) {
2155 if (bacmp(bdaddr
, &k
->bdaddr
) == 0) {
2165 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2166 u8 key_type
, u8 old_key_type
)
2169 if (key_type
< 0x03)
2172 /* Debug keys are insecure so don't store them persistently */
2173 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
2176 /* Changed combination key and there's no previous one */
2177 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
2180 /* Security mode 3 case */
2184 /* BR/EDR key derived using SC from an LE link */
2185 if (conn
->type
== LE_LINK
)
2188 /* Neither local nor remote side had no-bonding as requirement */
2189 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
2192 /* Local side had dedicated bonding as requirement */
2193 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
2196 /* Remote side had dedicated bonding as requirement */
2197 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
2200 /* If none of the above criteria match, then don't store the key
2205 static u8
ltk_role(u8 type
)
2207 if (type
== SMP_LTK
)
2208 return HCI_ROLE_MASTER
;
2210 return HCI_ROLE_SLAVE
;
2213 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2214 u8 addr_type
, u8 role
)
2219 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2220 if (addr_type
!= k
->bdaddr_type
|| bacmp(bdaddr
, &k
->bdaddr
))
2223 if (smp_ltk_is_sc(k
) || ltk_role(k
->type
) == role
) {
2233 struct smp_irk
*hci_find_irk_by_rpa(struct hci_dev
*hdev
, bdaddr_t
*rpa
)
2235 struct smp_irk
*irk
;
2238 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2239 if (!bacmp(&irk
->rpa
, rpa
)) {
2245 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2246 if (smp_irk_matches(hdev
, irk
->val
, rpa
)) {
2247 bacpy(&irk
->rpa
, rpa
);
2257 struct smp_irk
*hci_find_irk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2260 struct smp_irk
*irk
;
2262 /* Identity Address must be public or static random */
2263 if (addr_type
== ADDR_LE_DEV_RANDOM
&& (bdaddr
->b
[5] & 0xc0) != 0xc0)
2267 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2268 if (addr_type
== irk
->addr_type
&&
2269 bacmp(bdaddr
, &irk
->bdaddr
) == 0) {
2279 struct link_key
*hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2280 bdaddr_t
*bdaddr
, u8
*val
, u8 type
,
2281 u8 pin_len
, bool *persistent
)
2283 struct link_key
*key
, *old_key
;
2286 old_key
= hci_find_link_key(hdev
, bdaddr
);
2288 old_key_type
= old_key
->type
;
2291 old_key_type
= conn
? conn
->key_type
: 0xff;
2292 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2295 list_add_rcu(&key
->list
, &hdev
->link_keys
);
2298 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
2300 /* Some buggy controller combinations generate a changed
2301 * combination key for legacy pairing even when there's no
2303 if (type
== HCI_LK_CHANGED_COMBINATION
&&
2304 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
2305 type
= HCI_LK_COMBINATION
;
2307 conn
->key_type
= type
;
2310 bacpy(&key
->bdaddr
, bdaddr
);
2311 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
2312 key
->pin_len
= pin_len
;
2314 if (type
== HCI_LK_CHANGED_COMBINATION
)
2315 key
->type
= old_key_type
;
2320 *persistent
= hci_persistent_key(hdev
, conn
, type
,
2326 struct smp_ltk
*hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2327 u8 addr_type
, u8 type
, u8 authenticated
,
2328 u8 tk
[16], u8 enc_size
, __le16 ediv
, __le64 rand
)
2330 struct smp_ltk
*key
, *old_key
;
2331 u8 role
= ltk_role(type
);
2333 old_key
= hci_find_ltk(hdev
, bdaddr
, addr_type
, role
);
2337 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2340 list_add_rcu(&key
->list
, &hdev
->long_term_keys
);
2343 bacpy(&key
->bdaddr
, bdaddr
);
2344 key
->bdaddr_type
= addr_type
;
2345 memcpy(key
->val
, tk
, sizeof(key
->val
));
2346 key
->authenticated
= authenticated
;
2349 key
->enc_size
= enc_size
;
2355 struct smp_irk
*hci_add_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2356 u8 addr_type
, u8 val
[16], bdaddr_t
*rpa
)
2358 struct smp_irk
*irk
;
2360 irk
= hci_find_irk_by_addr(hdev
, bdaddr
, addr_type
);
2362 irk
= kzalloc(sizeof(*irk
), GFP_KERNEL
);
2366 bacpy(&irk
->bdaddr
, bdaddr
);
2367 irk
->addr_type
= addr_type
;
2369 list_add_rcu(&irk
->list
, &hdev
->identity_resolving_keys
);
2372 memcpy(irk
->val
, val
, 16);
2373 bacpy(&irk
->rpa
, rpa
);
2378 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2380 struct link_key
*key
;
2382 key
= hci_find_link_key(hdev
, bdaddr
);
2386 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2388 list_del_rcu(&key
->list
);
2389 kfree_rcu(key
, rcu
);
2394 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 bdaddr_type
)
2399 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2400 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->bdaddr_type
!= bdaddr_type
)
2403 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2405 list_del_rcu(&k
->list
);
2410 return removed
? 0 : -ENOENT
;
2413 void hci_remove_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
)
2417 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
2418 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->addr_type
!= addr_type
)
2421 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2423 list_del_rcu(&k
->list
);
2428 bool hci_bdaddr_is_paired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
2431 struct smp_irk
*irk
;
2434 if (type
== BDADDR_BREDR
) {
2435 if (hci_find_link_key(hdev
, bdaddr
))
2440 /* Convert to HCI addr type which struct smp_ltk uses */
2441 if (type
== BDADDR_LE_PUBLIC
)
2442 addr_type
= ADDR_LE_DEV_PUBLIC
;
2444 addr_type
= ADDR_LE_DEV_RANDOM
;
2446 irk
= hci_get_irk(hdev
, bdaddr
, addr_type
);
2448 bdaddr
= &irk
->bdaddr
;
2449 addr_type
= irk
->addr_type
;
2453 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2454 if (k
->bdaddr_type
== addr_type
&& !bacmp(bdaddr
, &k
->bdaddr
)) {
2464 /* HCI command timer function */
2465 static void hci_cmd_timeout(struct work_struct
*work
)
2467 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2470 if (hdev
->sent_cmd
) {
2471 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
2472 u16 opcode
= __le16_to_cpu(sent
->opcode
);
2474 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
2476 BT_ERR("%s command tx timeout", hdev
->name
);
2479 atomic_set(&hdev
->cmd_cnt
, 1);
2480 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2483 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
2484 bdaddr_t
*bdaddr
, u8 bdaddr_type
)
2486 struct oob_data
*data
;
2488 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
) {
2489 if (bacmp(bdaddr
, &data
->bdaddr
) != 0)
2491 if (data
->bdaddr_type
!= bdaddr_type
)
2499 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2502 struct oob_data
*data
;
2504 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
2508 BT_DBG("%s removing %pMR (%u)", hdev
->name
, bdaddr
, bdaddr_type
);
2510 list_del(&data
->list
);
2516 void hci_remote_oob_data_clear(struct hci_dev
*hdev
)
2518 struct oob_data
*data
, *n
;
2520 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
2521 list_del(&data
->list
);
2526 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2527 u8 bdaddr_type
, u8
*hash192
, u8
*rand192
,
2528 u8
*hash256
, u8
*rand256
)
2530 struct oob_data
*data
;
2532 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
2534 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
2538 bacpy(&data
->bdaddr
, bdaddr
);
2539 data
->bdaddr_type
= bdaddr_type
;
2540 list_add(&data
->list
, &hdev
->remote_oob_data
);
2543 if (hash192
&& rand192
) {
2544 memcpy(data
->hash192
, hash192
, sizeof(data
->hash192
));
2545 memcpy(data
->rand192
, rand192
, sizeof(data
->rand192
));
2546 if (hash256
&& rand256
)
2547 data
->present
= 0x03;
2549 memset(data
->hash192
, 0, sizeof(data
->hash192
));
2550 memset(data
->rand192
, 0, sizeof(data
->rand192
));
2551 if (hash256
&& rand256
)
2552 data
->present
= 0x02;
2554 data
->present
= 0x00;
2557 if (hash256
&& rand256
) {
2558 memcpy(data
->hash256
, hash256
, sizeof(data
->hash256
));
2559 memcpy(data
->rand256
, rand256
, sizeof(data
->rand256
));
2561 memset(data
->hash256
, 0, sizeof(data
->hash256
));
2562 memset(data
->rand256
, 0, sizeof(data
->rand256
));
2563 if (hash192
&& rand192
)
2564 data
->present
= 0x01;
2567 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
2572 /* This function requires the caller holds hdev->lock */
2573 struct adv_info
*hci_find_adv_instance(struct hci_dev
*hdev
, u8 instance
)
2575 struct adv_info
*adv_instance
;
2577 list_for_each_entry(adv_instance
, &hdev
->adv_instances
, list
) {
2578 if (adv_instance
->instance
== instance
)
2579 return adv_instance
;
2585 /* This function requires the caller holds hdev->lock */
2586 struct adv_info
*hci_get_next_instance(struct hci_dev
*hdev
, u8 instance
)
2588 struct adv_info
*cur_instance
;
2590 cur_instance
= hci_find_adv_instance(hdev
, instance
);
2594 if (cur_instance
== list_last_entry(&hdev
->adv_instances
,
2595 struct adv_info
, list
))
2596 return list_first_entry(&hdev
->adv_instances
,
2597 struct adv_info
, list
);
2599 return list_next_entry(cur_instance
, list
);
2602 /* This function requires the caller holds hdev->lock */
2603 int hci_remove_adv_instance(struct hci_dev
*hdev
, u8 instance
)
2605 struct adv_info
*adv_instance
;
2607 adv_instance
= hci_find_adv_instance(hdev
, instance
);
2611 BT_DBG("%s removing %dMR", hdev
->name
, instance
);
2613 if (hdev
->cur_adv_instance
== instance
) {
2614 if (hdev
->adv_instance_timeout
) {
2615 cancel_delayed_work(&hdev
->adv_instance_expire
);
2616 hdev
->adv_instance_timeout
= 0;
2618 hdev
->cur_adv_instance
= 0x00;
2621 list_del(&adv_instance
->list
);
2622 kfree(adv_instance
);
2624 hdev
->adv_instance_cnt
--;
2629 /* This function requires the caller holds hdev->lock */
2630 void hci_adv_instances_clear(struct hci_dev
*hdev
)
2632 struct adv_info
*adv_instance
, *n
;
2634 if (hdev
->adv_instance_timeout
) {
2635 cancel_delayed_work(&hdev
->adv_instance_expire
);
2636 hdev
->adv_instance_timeout
= 0;
2639 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
, list
) {
2640 list_del(&adv_instance
->list
);
2641 kfree(adv_instance
);
2644 hdev
->adv_instance_cnt
= 0;
2645 hdev
->cur_adv_instance
= 0x00;
2648 /* This function requires the caller holds hdev->lock */
2649 int hci_add_adv_instance(struct hci_dev
*hdev
, u8 instance
, u32 flags
,
2650 u16 adv_data_len
, u8
*adv_data
,
2651 u16 scan_rsp_len
, u8
*scan_rsp_data
,
2652 u16 timeout
, u16 duration
)
2654 struct adv_info
*adv_instance
;
2656 adv_instance
= hci_find_adv_instance(hdev
, instance
);
2658 memset(adv_instance
->adv_data
, 0,
2659 sizeof(adv_instance
->adv_data
));
2660 memset(adv_instance
->scan_rsp_data
, 0,
2661 sizeof(adv_instance
->scan_rsp_data
));
2663 if (hdev
->adv_instance_cnt
>= HCI_MAX_ADV_INSTANCES
||
2664 instance
< 1 || instance
> HCI_MAX_ADV_INSTANCES
)
2667 adv_instance
= kzalloc(sizeof(*adv_instance
), GFP_KERNEL
);
2671 adv_instance
->pending
= true;
2672 adv_instance
->instance
= instance
;
2673 list_add(&adv_instance
->list
, &hdev
->adv_instances
);
2674 hdev
->adv_instance_cnt
++;
2677 adv_instance
->flags
= flags
;
2678 adv_instance
->adv_data_len
= adv_data_len
;
2679 adv_instance
->scan_rsp_len
= scan_rsp_len
;
2682 memcpy(adv_instance
->adv_data
, adv_data
, adv_data_len
);
2685 memcpy(adv_instance
->scan_rsp_data
,
2686 scan_rsp_data
, scan_rsp_len
);
2688 adv_instance
->timeout
= timeout
;
2689 adv_instance
->remaining_time
= timeout
;
2692 adv_instance
->duration
= HCI_DEFAULT_ADV_DURATION
;
2694 adv_instance
->duration
= duration
;
2696 BT_DBG("%s for %dMR", hdev
->name
, instance
);
2701 struct bdaddr_list
*hci_bdaddr_list_lookup(struct list_head
*bdaddr_list
,
2702 bdaddr_t
*bdaddr
, u8 type
)
2704 struct bdaddr_list
*b
;
2706 list_for_each_entry(b
, bdaddr_list
, list
) {
2707 if (!bacmp(&b
->bdaddr
, bdaddr
) && b
->bdaddr_type
== type
)
2714 void hci_bdaddr_list_clear(struct list_head
*bdaddr_list
)
2716 struct bdaddr_list
*b
, *n
;
2718 list_for_each_entry_safe(b
, n
, bdaddr_list
, list
) {
2724 int hci_bdaddr_list_add(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2726 struct bdaddr_list
*entry
;
2728 if (!bacmp(bdaddr
, BDADDR_ANY
))
2731 if (hci_bdaddr_list_lookup(list
, bdaddr
, type
))
2734 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
2738 bacpy(&entry
->bdaddr
, bdaddr
);
2739 entry
->bdaddr_type
= type
;
2741 list_add(&entry
->list
, list
);
2746 int hci_bdaddr_list_del(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2748 struct bdaddr_list
*entry
;
2750 if (!bacmp(bdaddr
, BDADDR_ANY
)) {
2751 hci_bdaddr_list_clear(list
);
2755 entry
= hci_bdaddr_list_lookup(list
, bdaddr
, type
);
2759 list_del(&entry
->list
);
2765 /* This function requires the caller holds hdev->lock */
2766 struct hci_conn_params
*hci_conn_params_lookup(struct hci_dev
*hdev
,
2767 bdaddr_t
*addr
, u8 addr_type
)
2769 struct hci_conn_params
*params
;
2771 list_for_each_entry(params
, &hdev
->le_conn_params
, list
) {
2772 if (bacmp(¶ms
->addr
, addr
) == 0 &&
2773 params
->addr_type
== addr_type
) {
2781 /* This function requires the caller holds hdev->lock */
2782 struct hci_conn_params
*hci_pend_le_action_lookup(struct list_head
*list
,
2783 bdaddr_t
*addr
, u8 addr_type
)
2785 struct hci_conn_params
*param
;
2787 list_for_each_entry(param
, list
, action
) {
2788 if (bacmp(¶m
->addr
, addr
) == 0 &&
2789 param
->addr_type
== addr_type
)
2796 /* This function requires the caller holds hdev->lock */
2797 struct hci_conn_params
*hci_conn_params_add(struct hci_dev
*hdev
,
2798 bdaddr_t
*addr
, u8 addr_type
)
2800 struct hci_conn_params
*params
;
2802 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
2806 params
= kzalloc(sizeof(*params
), GFP_KERNEL
);
2808 BT_ERR("Out of memory");
2812 bacpy(¶ms
->addr
, addr
);
2813 params
->addr_type
= addr_type
;
2815 list_add(¶ms
->list
, &hdev
->le_conn_params
);
2816 INIT_LIST_HEAD(¶ms
->action
);
2818 params
->conn_min_interval
= hdev
->le_conn_min_interval
;
2819 params
->conn_max_interval
= hdev
->le_conn_max_interval
;
2820 params
->conn_latency
= hdev
->le_conn_latency
;
2821 params
->supervision_timeout
= hdev
->le_supv_timeout
;
2822 params
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
2824 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
2829 static void hci_conn_params_free(struct hci_conn_params
*params
)
2832 hci_conn_drop(params
->conn
);
2833 hci_conn_put(params
->conn
);
2836 list_del(¶ms
->action
);
2837 list_del(¶ms
->list
);
2841 /* This function requires the caller holds hdev->lock */
2842 void hci_conn_params_del(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 addr_type
)
2844 struct hci_conn_params
*params
;
2846 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
2850 hci_conn_params_free(params
);
2852 hci_update_background_scan(hdev
);
2854 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
2857 /* This function requires the caller holds hdev->lock */
2858 void hci_conn_params_clear_disabled(struct hci_dev
*hdev
)
2860 struct hci_conn_params
*params
, *tmp
;
2862 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
) {
2863 if (params
->auto_connect
!= HCI_AUTO_CONN_DISABLED
)
2866 /* If trying to estabilish one time connection to disabled
2867 * device, leave the params, but mark them as just once.
2869 if (params
->explicit_connect
) {
2870 params
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
2874 list_del(¶ms
->list
);
2878 BT_DBG("All LE disabled connection parameters were removed");
2881 /* This function requires the caller holds hdev->lock */
2882 static void hci_conn_params_clear_all(struct hci_dev
*hdev
)
2884 struct hci_conn_params
*params
, *tmp
;
2886 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
)
2887 hci_conn_params_free(params
);
2889 BT_DBG("All LE connection parameters were removed");
2892 /* Copy the Identity Address of the controller.
2894 * If the controller has a public BD_ADDR, then by default use that one.
2895 * If this is a LE only controller without a public address, default to
2896 * the static random address.
2898 * For debugging purposes it is possible to force controllers with a
2899 * public address to use the static random address instead.
2901 * In case BR/EDR has been disabled on a dual-mode controller and
2902 * userspace has configured a static address, then that address
2903 * becomes the identity address instead of the public BR/EDR address.
2905 void hci_copy_identity_address(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2908 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
2909 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
2910 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
2911 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
2912 bacpy(bdaddr
, &hdev
->static_addr
);
2913 *bdaddr_type
= ADDR_LE_DEV_RANDOM
;
2915 bacpy(bdaddr
, &hdev
->bdaddr
);
2916 *bdaddr_type
= ADDR_LE_DEV_PUBLIC
;
2920 /* Alloc HCI device */
2921 struct hci_dev
*hci_alloc_dev(void)
2923 struct hci_dev
*hdev
;
2925 hdev
= kzalloc(sizeof(*hdev
), GFP_KERNEL
);
2929 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
2930 hdev
->esco_type
= (ESCO_HV1
);
2931 hdev
->link_mode
= (HCI_LM_ACCEPT
);
2932 hdev
->num_iac
= 0x01; /* One IAC support is mandatory */
2933 hdev
->io_capability
= 0x03; /* No Input No Output */
2934 hdev
->manufacturer
= 0xffff; /* Default to internal use */
2935 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
2936 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
2937 hdev
->adv_instance_cnt
= 0;
2938 hdev
->cur_adv_instance
= 0x00;
2939 hdev
->adv_instance_timeout
= 0;
2941 hdev
->sniff_max_interval
= 800;
2942 hdev
->sniff_min_interval
= 80;
2944 hdev
->le_adv_channel_map
= 0x07;
2945 hdev
->le_adv_min_interval
= 0x0800;
2946 hdev
->le_adv_max_interval
= 0x0800;
2947 hdev
->le_scan_interval
= 0x0060;
2948 hdev
->le_scan_window
= 0x0030;
2949 hdev
->le_conn_min_interval
= 0x0028;
2950 hdev
->le_conn_max_interval
= 0x0038;
2951 hdev
->le_conn_latency
= 0x0000;
2952 hdev
->le_supv_timeout
= 0x002a;
2953 hdev
->le_def_tx_len
= 0x001b;
2954 hdev
->le_def_tx_time
= 0x0148;
2955 hdev
->le_max_tx_len
= 0x001b;
2956 hdev
->le_max_tx_time
= 0x0148;
2957 hdev
->le_max_rx_len
= 0x001b;
2958 hdev
->le_max_rx_time
= 0x0148;
2960 hdev
->rpa_timeout
= HCI_DEFAULT_RPA_TIMEOUT
;
2961 hdev
->discov_interleaved_timeout
= DISCOV_INTERLEAVED_TIMEOUT
;
2962 hdev
->conn_info_min_age
= DEFAULT_CONN_INFO_MIN_AGE
;
2963 hdev
->conn_info_max_age
= DEFAULT_CONN_INFO_MAX_AGE
;
2965 mutex_init(&hdev
->lock
);
2966 mutex_init(&hdev
->req_lock
);
2968 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
2969 INIT_LIST_HEAD(&hdev
->blacklist
);
2970 INIT_LIST_HEAD(&hdev
->whitelist
);
2971 INIT_LIST_HEAD(&hdev
->uuids
);
2972 INIT_LIST_HEAD(&hdev
->link_keys
);
2973 INIT_LIST_HEAD(&hdev
->long_term_keys
);
2974 INIT_LIST_HEAD(&hdev
->identity_resolving_keys
);
2975 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
2976 INIT_LIST_HEAD(&hdev
->le_white_list
);
2977 INIT_LIST_HEAD(&hdev
->le_conn_params
);
2978 INIT_LIST_HEAD(&hdev
->pend_le_conns
);
2979 INIT_LIST_HEAD(&hdev
->pend_le_reports
);
2980 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
2981 INIT_LIST_HEAD(&hdev
->adv_instances
);
2983 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
2984 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
2985 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
2986 INIT_WORK(&hdev
->power_on
, hci_power_on
);
2987 INIT_WORK(&hdev
->error_reset
, hci_error_reset
);
2989 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
2991 skb_queue_head_init(&hdev
->rx_q
);
2992 skb_queue_head_init(&hdev
->cmd_q
);
2993 skb_queue_head_init(&hdev
->raw_q
);
2995 init_waitqueue_head(&hdev
->req_wait_q
);
2997 INIT_DELAYED_WORK(&hdev
->cmd_timer
, hci_cmd_timeout
);
2999 hci_request_setup(hdev
);
3001 hci_init_sysfs(hdev
);
3002 discovery_init(hdev
);
3006 EXPORT_SYMBOL(hci_alloc_dev
);
3008 /* Free HCI device */
3009 void hci_free_dev(struct hci_dev
*hdev
)
3011 /* will free via device release */
3012 put_device(&hdev
->dev
);
3014 EXPORT_SYMBOL(hci_free_dev
);
3016 /* Register HCI device */
3017 int hci_register_dev(struct hci_dev
*hdev
)
3021 if (!hdev
->open
|| !hdev
->close
|| !hdev
->send
)
3024 /* Do not allow HCI_AMP devices to register at index 0,
3025 * so the index can be used as the AMP controller ID.
3027 switch (hdev
->dev_type
) {
3029 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
3032 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
3041 sprintf(hdev
->name
, "hci%d", id
);
3044 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3046 hdev
->workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
3047 WQ_MEM_RECLAIM
, 1, hdev
->name
);
3048 if (!hdev
->workqueue
) {
3053 hdev
->req_workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
3054 WQ_MEM_RECLAIM
, 1, hdev
->name
);
3055 if (!hdev
->req_workqueue
) {
3056 destroy_workqueue(hdev
->workqueue
);
3061 if (!IS_ERR_OR_NULL(bt_debugfs
))
3062 hdev
->debugfs
= debugfs_create_dir(hdev
->name
, bt_debugfs
);
3064 dev_set_name(&hdev
->dev
, "%s", hdev
->name
);
3066 error
= device_add(&hdev
->dev
);
3070 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
3071 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
3074 if (rfkill_register(hdev
->rfkill
) < 0) {
3075 rfkill_destroy(hdev
->rfkill
);
3076 hdev
->rfkill
= NULL
;
3080 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
))
3081 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
3083 hci_dev_set_flag(hdev
, HCI_SETUP
);
3084 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
3086 if (hdev
->dev_type
== HCI_BREDR
) {
3087 /* Assume BR/EDR support until proven otherwise (such as
3088 * through reading supported features during init.
3090 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
3093 write_lock(&hci_dev_list_lock
);
3094 list_add(&hdev
->list
, &hci_dev_list
);
3095 write_unlock(&hci_dev_list_lock
);
3097 /* Devices that are marked for raw-only usage are unconfigured
3098 * and should not be included in normal operation.
3100 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
3101 hci_dev_set_flag(hdev
, HCI_UNCONFIGURED
);
3103 hci_sock_dev_event(hdev
, HCI_DEV_REG
);
3106 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
3111 destroy_workqueue(hdev
->workqueue
);
3112 destroy_workqueue(hdev
->req_workqueue
);
3114 ida_simple_remove(&hci_index_ida
, hdev
->id
);
3118 EXPORT_SYMBOL(hci_register_dev
);
3120 /* Unregister HCI device */
3121 void hci_unregister_dev(struct hci_dev
*hdev
)
3125 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3127 hci_dev_set_flag(hdev
, HCI_UNREGISTER
);
3131 write_lock(&hci_dev_list_lock
);
3132 list_del(&hdev
->list
);
3133 write_unlock(&hci_dev_list_lock
);
3135 hci_dev_do_close(hdev
);
3137 cancel_work_sync(&hdev
->power_on
);
3139 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
3140 !hci_dev_test_flag(hdev
, HCI_SETUP
) &&
3141 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
3143 mgmt_index_removed(hdev
);
3144 hci_dev_unlock(hdev
);
3147 /* mgmt_index_removed should take care of emptying the
3149 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
3151 hci_sock_dev_event(hdev
, HCI_DEV_UNREG
);
3154 rfkill_unregister(hdev
->rfkill
);
3155 rfkill_destroy(hdev
->rfkill
);
3158 device_del(&hdev
->dev
);
3160 debugfs_remove_recursive(hdev
->debugfs
);
3162 destroy_workqueue(hdev
->workqueue
);
3163 destroy_workqueue(hdev
->req_workqueue
);
3166 hci_bdaddr_list_clear(&hdev
->blacklist
);
3167 hci_bdaddr_list_clear(&hdev
->whitelist
);
3168 hci_uuids_clear(hdev
);
3169 hci_link_keys_clear(hdev
);
3170 hci_smp_ltks_clear(hdev
);
3171 hci_smp_irks_clear(hdev
);
3172 hci_remote_oob_data_clear(hdev
);
3173 hci_adv_instances_clear(hdev
);
3174 hci_bdaddr_list_clear(&hdev
->le_white_list
);
3175 hci_conn_params_clear_all(hdev
);
3176 hci_discovery_filter_clear(hdev
);
3177 hci_dev_unlock(hdev
);
3181 ida_simple_remove(&hci_index_ida
, id
);
3183 EXPORT_SYMBOL(hci_unregister_dev
);
3185 /* Suspend HCI device */
3186 int hci_suspend_dev(struct hci_dev
*hdev
)
3188 hci_sock_dev_event(hdev
, HCI_DEV_SUSPEND
);
3191 EXPORT_SYMBOL(hci_suspend_dev
);
3193 /* Resume HCI device */
3194 int hci_resume_dev(struct hci_dev
*hdev
)
3196 hci_sock_dev_event(hdev
, HCI_DEV_RESUME
);
3199 EXPORT_SYMBOL(hci_resume_dev
);
3201 /* Reset HCI device */
3202 int hci_reset_dev(struct hci_dev
*hdev
)
3204 const u8 hw_err
[] = { HCI_EV_HARDWARE_ERROR
, 0x01, 0x00 };
3205 struct sk_buff
*skb
;
3207 skb
= bt_skb_alloc(3, GFP_ATOMIC
);
3211 hci_skb_pkt_type(skb
) = HCI_EVENT_PKT
;
3212 memcpy(skb_put(skb
, 3), hw_err
, 3);
3214 /* Send Hardware Error to upper stack */
3215 return hci_recv_frame(hdev
, skb
);
3217 EXPORT_SYMBOL(hci_reset_dev
);
3219 /* Receive frame from HCI drivers */
3220 int hci_recv_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3222 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
3223 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
3228 if (hci_skb_pkt_type(skb
) != HCI_EVENT_PKT
&&
3229 hci_skb_pkt_type(skb
) != HCI_ACLDATA_PKT
&&
3230 hci_skb_pkt_type(skb
) != HCI_SCODATA_PKT
) {
3236 bt_cb(skb
)->incoming
= 1;
3239 __net_timestamp(skb
);
3241 skb_queue_tail(&hdev
->rx_q
, skb
);
3242 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
3246 EXPORT_SYMBOL(hci_recv_frame
);
3248 /* Receive diagnostic message from HCI drivers */
3249 int hci_recv_diag(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3251 /* Mark as diagnostic packet */
3252 hci_skb_pkt_type(skb
) = HCI_DIAG_PKT
;
3255 __net_timestamp(skb
);
3257 skb_queue_tail(&hdev
->rx_q
, skb
);
3258 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
3262 EXPORT_SYMBOL(hci_recv_diag
);
3264 /* ---- Interface to upper protocols ---- */
3266 int hci_register_cb(struct hci_cb
*cb
)
3268 BT_DBG("%p name %s", cb
, cb
->name
);
3270 mutex_lock(&hci_cb_list_lock
);
3271 list_add_tail(&cb
->list
, &hci_cb_list
);
3272 mutex_unlock(&hci_cb_list_lock
);
3276 EXPORT_SYMBOL(hci_register_cb
);
3278 int hci_unregister_cb(struct hci_cb
*cb
)
3280 BT_DBG("%p name %s", cb
, cb
->name
);
3282 mutex_lock(&hci_cb_list_lock
);
3283 list_del(&cb
->list
);
3284 mutex_unlock(&hci_cb_list_lock
);
3288 EXPORT_SYMBOL(hci_unregister_cb
);
3290 static void hci_send_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3294 BT_DBG("%s type %d len %d", hdev
->name
, hci_skb_pkt_type(skb
),
3298 __net_timestamp(skb
);
3300 /* Send copy to monitor */
3301 hci_send_to_monitor(hdev
, skb
);
3303 if (atomic_read(&hdev
->promisc
)) {
3304 /* Send copy to the sockets */
3305 hci_send_to_sock(hdev
, skb
);
3308 /* Get rid of skb owner, prior to sending to the driver. */
3311 if (!test_bit(HCI_RUNNING
, &hdev
->flags
)) {
3316 err
= hdev
->send(hdev
, skb
);
3318 BT_ERR("%s sending frame failed (%d)", hdev
->name
, err
);
3323 /* Send HCI command */
3324 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
,
3327 struct sk_buff
*skb
;
3329 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
3331 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
3333 BT_ERR("%s no memory for command", hdev
->name
);
3337 /* Stand-alone HCI commands must be flagged as
3338 * single-command requests.
3340 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_START
;
3342 skb_queue_tail(&hdev
->cmd_q
, skb
);
3343 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3348 /* Get data from the previously sent command */
3349 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
3351 struct hci_command_hdr
*hdr
;
3353 if (!hdev
->sent_cmd
)
3356 hdr
= (void *) hdev
->sent_cmd
->data
;
3358 if (hdr
->opcode
!= cpu_to_le16(opcode
))
3361 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
3363 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
3366 /* Send HCI command and wait for command commplete event */
3367 struct sk_buff
*hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
3368 const void *param
, u32 timeout
)
3370 struct sk_buff
*skb
;
3372 if (!test_bit(HCI_UP
, &hdev
->flags
))
3373 return ERR_PTR(-ENETDOWN
);
3375 bt_dev_dbg(hdev
, "opcode 0x%4.4x plen %d", opcode
, plen
);
3377 hci_req_sync_lock(hdev
);
3378 skb
= __hci_cmd_sync(hdev
, opcode
, plen
, param
, timeout
);
3379 hci_req_sync_unlock(hdev
);
3383 EXPORT_SYMBOL(hci_cmd_sync
);
3386 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
3388 struct hci_acl_hdr
*hdr
;
3391 skb_push(skb
, HCI_ACL_HDR_SIZE
);
3392 skb_reset_transport_header(skb
);
3393 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
3394 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
3395 hdr
->dlen
= cpu_to_le16(len
);
3398 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
3399 struct sk_buff
*skb
, __u16 flags
)
3401 struct hci_conn
*conn
= chan
->conn
;
3402 struct hci_dev
*hdev
= conn
->hdev
;
3403 struct sk_buff
*list
;
3405 skb
->len
= skb_headlen(skb
);
3408 hci_skb_pkt_type(skb
) = HCI_ACLDATA_PKT
;
3410 switch (hdev
->dev_type
) {
3412 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3415 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
3418 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
3422 list
= skb_shinfo(skb
)->frag_list
;
3424 /* Non fragmented */
3425 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
3427 skb_queue_tail(queue
, skb
);
3430 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3432 skb_shinfo(skb
)->frag_list
= NULL
;
3434 /* Queue all fragments atomically. We need to use spin_lock_bh
3435 * here because of 6LoWPAN links, as there this function is
3436 * called from softirq and using normal spin lock could cause
3439 spin_lock_bh(&queue
->lock
);
3441 __skb_queue_tail(queue
, skb
);
3443 flags
&= ~ACL_START
;
3446 skb
= list
; list
= list
->next
;
3448 hci_skb_pkt_type(skb
) = HCI_ACLDATA_PKT
;
3449 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3451 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3453 __skb_queue_tail(queue
, skb
);
3456 spin_unlock_bh(&queue
->lock
);
3460 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
3462 struct hci_dev
*hdev
= chan
->conn
->hdev
;
3464 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
3466 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
3468 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3472 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
3474 struct hci_dev
*hdev
= conn
->hdev
;
3475 struct hci_sco_hdr hdr
;
3477 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
3479 hdr
.handle
= cpu_to_le16(conn
->handle
);
3480 hdr
.dlen
= skb
->len
;
3482 skb_push(skb
, HCI_SCO_HDR_SIZE
);
3483 skb_reset_transport_header(skb
);
3484 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
3486 hci_skb_pkt_type(skb
) = HCI_SCODATA_PKT
;
3488 skb_queue_tail(&conn
->data_q
, skb
);
3489 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3492 /* ---- HCI TX task (outgoing data) ---- */
3494 /* HCI Connection scheduler */
3495 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
3498 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3499 struct hci_conn
*conn
= NULL
, *c
;
3500 unsigned int num
= 0, min
= ~0;
3502 /* We don't have to lock device here. Connections are always
3503 * added and removed with TX task disabled. */
3507 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3508 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
3511 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
3516 if (c
->sent
< min
) {
3521 if (hci_conn_num(hdev
, type
) == num
)
3530 switch (conn
->type
) {
3532 cnt
= hdev
->acl_cnt
;
3536 cnt
= hdev
->sco_cnt
;
3539 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3543 BT_ERR("Unknown link type");
3551 BT_DBG("conn %p quote %d", conn
, *quote
);
3555 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
3557 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3560 BT_ERR("%s link tx timeout", hdev
->name
);
3564 /* Kill stalled connections */
3565 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3566 if (c
->type
== type
&& c
->sent
) {
3567 BT_ERR("%s killing stalled connection %pMR",
3568 hdev
->name
, &c
->dst
);
3569 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
3576 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
3579 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3580 struct hci_chan
*chan
= NULL
;
3581 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
3582 struct hci_conn
*conn
;
3583 int cnt
, q
, conn_num
= 0;
3585 BT_DBG("%s", hdev
->name
);
3589 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3590 struct hci_chan
*tmp
;
3592 if (conn
->type
!= type
)
3595 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3600 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
3601 struct sk_buff
*skb
;
3603 if (skb_queue_empty(&tmp
->data_q
))
3606 skb
= skb_peek(&tmp
->data_q
);
3607 if (skb
->priority
< cur_prio
)
3610 if (skb
->priority
> cur_prio
) {
3613 cur_prio
= skb
->priority
;
3618 if (conn
->sent
< min
) {
3624 if (hci_conn_num(hdev
, type
) == conn_num
)
3633 switch (chan
->conn
->type
) {
3635 cnt
= hdev
->acl_cnt
;
3638 cnt
= hdev
->block_cnt
;
3642 cnt
= hdev
->sco_cnt
;
3645 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3649 BT_ERR("Unknown link type");
3654 BT_DBG("chan %p quote %d", chan
, *quote
);
3658 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
3660 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3661 struct hci_conn
*conn
;
3664 BT_DBG("%s", hdev
->name
);
3668 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3669 struct hci_chan
*chan
;
3671 if (conn
->type
!= type
)
3674 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3679 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
3680 struct sk_buff
*skb
;
3687 if (skb_queue_empty(&chan
->data_q
))
3690 skb
= skb_peek(&chan
->data_q
);
3691 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
3694 skb
->priority
= HCI_PRIO_MAX
- 1;
3696 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
3700 if (hci_conn_num(hdev
, type
) == num
)
3708 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3710 /* Calculate count of blocks used by this packet */
3711 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
3714 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
3716 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
3717 /* ACL tx timeout must be longer than maximum
3718 * link supervision timeout (40.9 seconds) */
3719 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
3720 HCI_ACL_TX_TIMEOUT
))
3721 hci_link_tx_to(hdev
, ACL_LINK
);
3725 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
3727 unsigned int cnt
= hdev
->acl_cnt
;
3728 struct hci_chan
*chan
;
3729 struct sk_buff
*skb
;
3732 __check_timeout(hdev
, cnt
);
3734 while (hdev
->acl_cnt
&&
3735 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
3736 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3737 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3738 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3739 skb
->len
, skb
->priority
);
3741 /* Stop if priority has changed */
3742 if (skb
->priority
< priority
)
3745 skb
= skb_dequeue(&chan
->data_q
);
3747 hci_conn_enter_active_mode(chan
->conn
,
3748 bt_cb(skb
)->force_active
);
3750 hci_send_frame(hdev
, skb
);
3751 hdev
->acl_last_tx
= jiffies
;
3759 if (cnt
!= hdev
->acl_cnt
)
3760 hci_prio_recalculate(hdev
, ACL_LINK
);
3763 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
3765 unsigned int cnt
= hdev
->block_cnt
;
3766 struct hci_chan
*chan
;
3767 struct sk_buff
*skb
;
3771 __check_timeout(hdev
, cnt
);
3773 BT_DBG("%s", hdev
->name
);
3775 if (hdev
->dev_type
== HCI_AMP
)
3780 while (hdev
->block_cnt
> 0 &&
3781 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
3782 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3783 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
3786 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3787 skb
->len
, skb
->priority
);
3789 /* Stop if priority has changed */
3790 if (skb
->priority
< priority
)
3793 skb
= skb_dequeue(&chan
->data_q
);
3795 blocks
= __get_blocks(hdev
, skb
);
3796 if (blocks
> hdev
->block_cnt
)
3799 hci_conn_enter_active_mode(chan
->conn
,
3800 bt_cb(skb
)->force_active
);
3802 hci_send_frame(hdev
, skb
);
3803 hdev
->acl_last_tx
= jiffies
;
3805 hdev
->block_cnt
-= blocks
;
3808 chan
->sent
+= blocks
;
3809 chan
->conn
->sent
+= blocks
;
3813 if (cnt
!= hdev
->block_cnt
)
3814 hci_prio_recalculate(hdev
, type
);
3817 static void hci_sched_acl(struct hci_dev
*hdev
)
3819 BT_DBG("%s", hdev
->name
);
3821 /* No ACL link over BR/EDR controller */
3822 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
3825 /* No AMP link over AMP controller */
3826 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
3829 switch (hdev
->flow_ctl_mode
) {
3830 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
3831 hci_sched_acl_pkt(hdev
);
3834 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
3835 hci_sched_acl_blk(hdev
);
3841 static void hci_sched_sco(struct hci_dev
*hdev
)
3843 struct hci_conn
*conn
;
3844 struct sk_buff
*skb
;
3847 BT_DBG("%s", hdev
->name
);
3849 if (!hci_conn_num(hdev
, SCO_LINK
))
3852 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
3853 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3854 BT_DBG("skb %p len %d", skb
, skb
->len
);
3855 hci_send_frame(hdev
, skb
);
3858 if (conn
->sent
== ~0)
3864 static void hci_sched_esco(struct hci_dev
*hdev
)
3866 struct hci_conn
*conn
;
3867 struct sk_buff
*skb
;
3870 BT_DBG("%s", hdev
->name
);
3872 if (!hci_conn_num(hdev
, ESCO_LINK
))
3875 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
3877 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3878 BT_DBG("skb %p len %d", skb
, skb
->len
);
3879 hci_send_frame(hdev
, skb
);
3882 if (conn
->sent
== ~0)
3888 static void hci_sched_le(struct hci_dev
*hdev
)
3890 struct hci_chan
*chan
;
3891 struct sk_buff
*skb
;
3892 int quote
, cnt
, tmp
;
3894 BT_DBG("%s", hdev
->name
);
3896 if (!hci_conn_num(hdev
, LE_LINK
))
3899 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
3900 /* LE tx timeout must be longer than maximum
3901 * link supervision timeout (40.9 seconds) */
3902 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
3903 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
3904 hci_link_tx_to(hdev
, LE_LINK
);
3907 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
3909 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
3910 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3911 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3912 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3913 skb
->len
, skb
->priority
);
3915 /* Stop if priority has changed */
3916 if (skb
->priority
< priority
)
3919 skb
= skb_dequeue(&chan
->data_q
);
3921 hci_send_frame(hdev
, skb
);
3922 hdev
->le_last_tx
= jiffies
;
3933 hdev
->acl_cnt
= cnt
;
3936 hci_prio_recalculate(hdev
, LE_LINK
);
3939 static void hci_tx_work(struct work_struct
*work
)
3941 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
3942 struct sk_buff
*skb
;
3944 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
3945 hdev
->sco_cnt
, hdev
->le_cnt
);
3947 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
3948 /* Schedule queues and send stuff to HCI driver */
3949 hci_sched_acl(hdev
);
3950 hci_sched_sco(hdev
);
3951 hci_sched_esco(hdev
);
3955 /* Send next queued raw (unknown type) packet */
3956 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
3957 hci_send_frame(hdev
, skb
);
3960 /* ----- HCI RX task (incoming data processing) ----- */
3962 /* ACL data packet */
3963 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3965 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
3966 struct hci_conn
*conn
;
3967 __u16 handle
, flags
;
3969 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
3971 handle
= __le16_to_cpu(hdr
->handle
);
3972 flags
= hci_flags(handle
);
3973 handle
= hci_handle(handle
);
3975 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
3978 hdev
->stat
.acl_rx
++;
3981 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
3982 hci_dev_unlock(hdev
);
3985 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
3987 /* Send to upper protocol */
3988 l2cap_recv_acldata(conn
, skb
, flags
);
3991 BT_ERR("%s ACL packet for unknown connection handle %d",
3992 hdev
->name
, handle
);
3998 /* SCO data packet */
3999 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4001 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
4002 struct hci_conn
*conn
;
4005 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
4007 handle
= __le16_to_cpu(hdr
->handle
);
4009 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
4011 hdev
->stat
.sco_rx
++;
4014 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4015 hci_dev_unlock(hdev
);
4018 /* Send to upper protocol */
4019 sco_recv_scodata(conn
, skb
);
4022 BT_ERR("%s SCO packet for unknown connection handle %d",
4023 hdev
->name
, handle
);
4029 static bool hci_req_is_complete(struct hci_dev
*hdev
)
4031 struct sk_buff
*skb
;
4033 skb
= skb_peek(&hdev
->cmd_q
);
4037 return (bt_cb(skb
)->hci
.req_flags
& HCI_REQ_START
);
4040 static void hci_resend_last(struct hci_dev
*hdev
)
4042 struct hci_command_hdr
*sent
;
4043 struct sk_buff
*skb
;
4046 if (!hdev
->sent_cmd
)
4049 sent
= (void *) hdev
->sent_cmd
->data
;
4050 opcode
= __le16_to_cpu(sent
->opcode
);
4051 if (opcode
== HCI_OP_RESET
)
4054 skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
4058 skb_queue_head(&hdev
->cmd_q
, skb
);
4059 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
4062 void hci_req_cmd_complete(struct hci_dev
*hdev
, u16 opcode
, u8 status
,
4063 hci_req_complete_t
*req_complete
,
4064 hci_req_complete_skb_t
*req_complete_skb
)
4066 struct sk_buff
*skb
;
4067 unsigned long flags
;
4069 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
4071 /* If the completed command doesn't match the last one that was
4072 * sent we need to do special handling of it.
4074 if (!hci_sent_cmd_data(hdev
, opcode
)) {
4075 /* Some CSR based controllers generate a spontaneous
4076 * reset complete event during init and any pending
4077 * command will never be completed. In such a case we
4078 * need to resend whatever was the last sent
4081 if (test_bit(HCI_INIT
, &hdev
->flags
) && opcode
== HCI_OP_RESET
)
4082 hci_resend_last(hdev
);
4087 /* If the command succeeded and there's still more commands in
4088 * this request the request is not yet complete.
4090 if (!status
&& !hci_req_is_complete(hdev
))
4093 /* If this was the last command in a request the complete
4094 * callback would be found in hdev->sent_cmd instead of the
4095 * command queue (hdev->cmd_q).
4097 if (bt_cb(hdev
->sent_cmd
)->hci
.req_flags
& HCI_REQ_SKB
) {
4098 *req_complete_skb
= bt_cb(hdev
->sent_cmd
)->hci
.req_complete_skb
;
4102 if (bt_cb(hdev
->sent_cmd
)->hci
.req_complete
) {
4103 *req_complete
= bt_cb(hdev
->sent_cmd
)->hci
.req_complete
;
4107 /* Remove all pending commands belonging to this request */
4108 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
4109 while ((skb
= __skb_dequeue(&hdev
->cmd_q
))) {
4110 if (bt_cb(skb
)->hci
.req_flags
& HCI_REQ_START
) {
4111 __skb_queue_head(&hdev
->cmd_q
, skb
);
4115 if (bt_cb(skb
)->hci
.req_flags
& HCI_REQ_SKB
)
4116 *req_complete_skb
= bt_cb(skb
)->hci
.req_complete_skb
;
4118 *req_complete
= bt_cb(skb
)->hci
.req_complete
;
4121 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
4124 static void hci_rx_work(struct work_struct
*work
)
4126 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
4127 struct sk_buff
*skb
;
4129 BT_DBG("%s", hdev
->name
);
4131 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
4132 /* Send copy to monitor */
4133 hci_send_to_monitor(hdev
, skb
);
4135 if (atomic_read(&hdev
->promisc
)) {
4136 /* Send copy to the sockets */
4137 hci_send_to_sock(hdev
, skb
);
4140 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
4145 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
4146 /* Don't process data packets in this states. */
4147 switch (hci_skb_pkt_type(skb
)) {
4148 case HCI_ACLDATA_PKT
:
4149 case HCI_SCODATA_PKT
:
4156 switch (hci_skb_pkt_type(skb
)) {
4158 BT_DBG("%s Event packet", hdev
->name
);
4159 hci_event_packet(hdev
, skb
);
4162 case HCI_ACLDATA_PKT
:
4163 BT_DBG("%s ACL data packet", hdev
->name
);
4164 hci_acldata_packet(hdev
, skb
);
4167 case HCI_SCODATA_PKT
:
4168 BT_DBG("%s SCO data packet", hdev
->name
);
4169 hci_scodata_packet(hdev
, skb
);
4179 static void hci_cmd_work(struct work_struct
*work
)
4181 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
4182 struct sk_buff
*skb
;
4184 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
4185 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
4187 /* Send queued commands */
4188 if (atomic_read(&hdev
->cmd_cnt
)) {
4189 skb
= skb_dequeue(&hdev
->cmd_q
);
4193 kfree_skb(hdev
->sent_cmd
);
4195 hdev
->sent_cmd
= skb_clone(skb
, GFP_KERNEL
);
4196 if (hdev
->sent_cmd
) {
4197 atomic_dec(&hdev
->cmd_cnt
);
4198 hci_send_frame(hdev
, skb
);
4199 if (test_bit(HCI_RESET
, &hdev
->flags
))
4200 cancel_delayed_work(&hdev
->cmd_timer
);
4202 schedule_delayed_work(&hdev
->cmd_timer
,
4205 skb_queue_head(&hdev
->cmd_q
, skb
);
4206 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);