Merge tag 'platform-drivers-x86-v4.2-1' of git://git.infradead.org/users/dvhart/linux...
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46
JH
42#include "smp.h"
43
b78752cc 44static void hci_rx_work(struct work_struct *work);
c347b765 45static void hci_cmd_work(struct work_struct *work);
3eff45ea 46static void hci_tx_work(struct work_struct *work);
1da177e4 47
1da177e4
LT
48/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
fba7ecf0 54DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 55
3df92b31
SL
56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
899de765
MH
59/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
1da177e4
LT
68/* ---- HCI notifications ---- */
69
6516455d 70static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 71{
040030ef 72 hci_sock_dev_event(hdev, event);
1da177e4
LT
73}
74
baf27f6e
MH
75/* ---- HCI debugfs entries ---- */
76
4b4148e9
MH
77static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
b7cb93e5 83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
4b4148e9
MH
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
4b4148e9
MH
97
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
b7cb93e5 108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
4b4148e9
MH
123 kfree_skb(skb);
124
b7cb93e5 125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
126
127 return count;
128}
129
130static const struct file_operations dut_mode_fops = {
131 .open = simple_open,
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
135};
136
1da177e4
LT
137/* ---- HCI requests ---- */
138
f60cb305
JH
139static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
140 struct sk_buff *skb)
1da177e4 141{
42c6b129 142 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
143
144 if (hdev->req_status == HCI_REQ_PEND) {
145 hdev->req_result = result;
146 hdev->req_status = HCI_REQ_DONE;
f60cb305
JH
147 if (skb)
148 hdev->req_skb = skb_get(skb);
1da177e4
LT
149 wake_up_interruptible(&hdev->req_wait_q);
150 }
151}
152
153static void hci_req_cancel(struct hci_dev *hdev, int err)
154{
155 BT_DBG("%s err 0x%2.2x", hdev->name, err);
156
157 if (hdev->req_status == HCI_REQ_PEND) {
158 hdev->req_result = err;
159 hdev->req_status = HCI_REQ_CANCELED;
160 wake_up_interruptible(&hdev->req_wait_q);
161 }
162}
163
7b1abbbe 164struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 165 const void *param, u8 event, u32 timeout)
75e84b7c
JH
166{
167 DECLARE_WAITQUEUE(wait, current);
168 struct hci_request req;
f60cb305 169 struct sk_buff *skb;
75e84b7c
JH
170 int err = 0;
171
172 BT_DBG("%s", hdev->name);
173
174 hci_req_init(&req, hdev);
175
7b1abbbe 176 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
177
178 hdev->req_status = HCI_REQ_PEND;
179
75e84b7c
JH
180 add_wait_queue(&hdev->req_wait_q, &wait);
181 set_current_state(TASK_INTERRUPTIBLE);
182
f60cb305 183 err = hci_req_run_skb(&req, hci_req_sync_complete);
039fada5
CP
184 if (err < 0) {
185 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 186 set_current_state(TASK_RUNNING);
039fada5
CP
187 return ERR_PTR(err);
188 }
189
75e84b7c
JH
190 schedule_timeout(timeout);
191
192 remove_wait_queue(&hdev->req_wait_q, &wait);
193
194 if (signal_pending(current))
195 return ERR_PTR(-EINTR);
196
197 switch (hdev->req_status) {
198 case HCI_REQ_DONE:
199 err = -bt_to_errno(hdev->req_result);
200 break;
201
202 case HCI_REQ_CANCELED:
203 err = -hdev->req_result;
204 break;
205
206 default:
207 err = -ETIMEDOUT;
208 break;
209 }
210
211 hdev->req_status = hdev->req_result = 0;
f60cb305
JH
212 skb = hdev->req_skb;
213 hdev->req_skb = NULL;
75e84b7c
JH
214
215 BT_DBG("%s end: err %d", hdev->name, err);
216
f60cb305
JH
217 if (err < 0) {
218 kfree_skb(skb);
75e84b7c 219 return ERR_PTR(err);
f60cb305 220 }
75e84b7c 221
757aa0b5
JH
222 if (!skb)
223 return ERR_PTR(-ENODATA);
224
225 return skb;
7b1abbbe
JH
226}
227EXPORT_SYMBOL(__hci_cmd_sync_ev);
228
229struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 230 const void *param, u32 timeout)
7b1abbbe
JH
231{
232 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
233}
234EXPORT_SYMBOL(__hci_cmd_sync);
235
1da177e4 236/* Execute request and wait for completion. */
01178cd4 237static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
238 void (*func)(struct hci_request *req,
239 unsigned long opt),
01178cd4 240 unsigned long opt, __u32 timeout)
1da177e4 241{
42c6b129 242 struct hci_request req;
1da177e4
LT
243 DECLARE_WAITQUEUE(wait, current);
244 int err = 0;
245
246 BT_DBG("%s start", hdev->name);
247
42c6b129
JH
248 hci_req_init(&req, hdev);
249
1da177e4
LT
250 hdev->req_status = HCI_REQ_PEND;
251
42c6b129 252 func(&req, opt);
53cce22d 253
039fada5
CP
254 add_wait_queue(&hdev->req_wait_q, &wait);
255 set_current_state(TASK_INTERRUPTIBLE);
256
f60cb305 257 err = hci_req_run_skb(&req, hci_req_sync_complete);
42c6b129 258 if (err < 0) {
53cce22d 259 hdev->req_status = 0;
920c8300 260
039fada5 261 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 262 set_current_state(TASK_RUNNING);
039fada5 263
920c8300
AG
264 /* ENODATA means the HCI request command queue is empty.
265 * This can happen when a request with conditionals doesn't
266 * trigger any commands to be sent. This is normal behavior
267 * and should not trigger an error return.
42c6b129 268 */
920c8300
AG
269 if (err == -ENODATA)
270 return 0;
271
272 return err;
53cce22d
JH
273 }
274
1da177e4
LT
275 schedule_timeout(timeout);
276
277 remove_wait_queue(&hdev->req_wait_q, &wait);
278
279 if (signal_pending(current))
280 return -EINTR;
281
282 switch (hdev->req_status) {
283 case HCI_REQ_DONE:
e175072f 284 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
285 break;
286
287 case HCI_REQ_CANCELED:
288 err = -hdev->req_result;
289 break;
290
291 default:
292 err = -ETIMEDOUT;
293 break;
3ff50b79 294 }
1da177e4 295
a5040efa 296 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
297
298 BT_DBG("%s end: err %d", hdev->name, err);
299
300 return err;
301}
302
01178cd4 303static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
304 void (*req)(struct hci_request *req,
305 unsigned long opt),
01178cd4 306 unsigned long opt, __u32 timeout)
1da177e4
LT
307{
308 int ret;
309
7c6a329e
MH
310 if (!test_bit(HCI_UP, &hdev->flags))
311 return -ENETDOWN;
312
1da177e4
LT
313 /* Serialize all requests */
314 hci_req_lock(hdev);
01178cd4 315 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
316 hci_req_unlock(hdev);
317
318 return ret;
319}
320
42c6b129 321static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 322{
42c6b129 323 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
324
325 /* Reset device */
42c6b129
JH
326 set_bit(HCI_RESET, &req->hdev->flags);
327 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
328}
329
42c6b129 330static void bredr_init(struct hci_request *req)
1da177e4 331{
42c6b129 332 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 333
1da177e4 334 /* Read Local Supported Features */
42c6b129 335 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 336
1143e5a6 337 /* Read Local Version */
42c6b129 338 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
339
340 /* Read BD Address */
42c6b129 341 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
342}
343
0af801b9 344static void amp_init1(struct hci_request *req)
e61ef499 345{
42c6b129 346 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 347
e61ef499 348 /* Read Local Version */
42c6b129 349 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 350
f6996cfe
MH
351 /* Read Local Supported Commands */
352 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
353
6bcbc489 354 /* Read Local AMP Info */
42c6b129 355 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
356
357 /* Read Data Blk size */
42c6b129 358 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 359
f38ba941
MH
360 /* Read Flow Control Mode */
361 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
362
7528ca1c
MH
363 /* Read Location Data */
364 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
365}
366
0af801b9
JH
367static void amp_init2(struct hci_request *req)
368{
369 /* Read Local Supported Features. Not all AMP controllers
370 * support this so it's placed conditionally in the second
371 * stage init.
372 */
373 if (req->hdev->commands[14] & 0x20)
374 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
375}
376
42c6b129 377static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 378{
42c6b129 379 struct hci_dev *hdev = req->hdev;
e61ef499
AE
380
381 BT_DBG("%s %ld", hdev->name, opt);
382
11778716
AE
383 /* Reset */
384 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 385 hci_reset_req(req, 0);
11778716 386
e61ef499
AE
387 switch (hdev->dev_type) {
388 case HCI_BREDR:
42c6b129 389 bredr_init(req);
e61ef499
AE
390 break;
391
392 case HCI_AMP:
0af801b9 393 amp_init1(req);
e61ef499
AE
394 break;
395
396 default:
397 BT_ERR("Unknown device type %d", hdev->dev_type);
398 break;
399 }
e61ef499
AE
400}
401
42c6b129 402static void bredr_setup(struct hci_request *req)
2177bab5 403{
2177bab5
JH
404 __le16 param;
405 __u8 flt_type;
406
407 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 408 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
409
410 /* Read Class of Device */
42c6b129 411 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
412
413 /* Read Local Name */
42c6b129 414 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
415
416 /* Read Voice Setting */
42c6b129 417 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 418
b4cb9fb2
MH
419 /* Read Number of Supported IAC */
420 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
421
4b836f39
MH
422 /* Read Current IAC LAP */
423 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
424
2177bab5
JH
425 /* Clear Event Filters */
426 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 427 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
428
429 /* Connection accept timeout ~20 secs */
dcf4adbf 430 param = cpu_to_le16(0x7d00);
42c6b129 431 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
432}
433
42c6b129 434static void le_setup(struct hci_request *req)
2177bab5 435{
c73eee91
JH
436 struct hci_dev *hdev = req->hdev;
437
2177bab5 438 /* Read LE Buffer Size */
42c6b129 439 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
440
441 /* Read LE Local Supported Features */
42c6b129 442 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 443
747d3f03
MH
444 /* Read LE Supported States */
445 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
446
2177bab5 447 /* Read LE White List Size */
42c6b129 448 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 449
747d3f03
MH
450 /* Clear LE White List */
451 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
452
453 /* LE-only controllers have LE implicitly enabled */
454 if (!lmp_bredr_capable(hdev))
a1536da2 455 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
456}
457
42c6b129 458static void hci_setup_event_mask(struct hci_request *req)
2177bab5 459{
42c6b129
JH
460 struct hci_dev *hdev = req->hdev;
461
2177bab5
JH
462 /* The second byte is 0xff instead of 0x9f (two reserved bits
463 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
464 * command otherwise.
465 */
466 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
467
468 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
469 * any event mask for pre 1.2 devices.
470 */
471 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
472 return;
473
474 if (lmp_bredr_capable(hdev)) {
475 events[4] |= 0x01; /* Flow Specification Complete */
476 events[4] |= 0x02; /* Inquiry Result with RSSI */
477 events[4] |= 0x04; /* Read Remote Extended Features Complete */
478 events[5] |= 0x08; /* Synchronous Connection Complete */
479 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
480 } else {
481 /* Use a different default for LE-only devices */
482 memset(events, 0, sizeof(events));
483 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
484 events[1] |= 0x08; /* Read Remote Version Information Complete */
485 events[1] |= 0x20; /* Command Complete */
486 events[1] |= 0x40; /* Command Status */
487 events[1] |= 0x80; /* Hardware Error */
488 events[2] |= 0x04; /* Number of Completed Packets */
489 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
490
491 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492 events[0] |= 0x80; /* Encryption Change */
493 events[5] |= 0x80; /* Encryption Key Refresh Complete */
494 }
2177bab5
JH
495 }
496
497 if (lmp_inq_rssi_capable(hdev))
498 events[4] |= 0x02; /* Inquiry Result with RSSI */
499
500 if (lmp_sniffsubr_capable(hdev))
501 events[5] |= 0x20; /* Sniff Subrating */
502
503 if (lmp_pause_enc_capable(hdev))
504 events[5] |= 0x80; /* Encryption Key Refresh Complete */
505
506 if (lmp_ext_inq_capable(hdev))
507 events[5] |= 0x40; /* Extended Inquiry Result */
508
509 if (lmp_no_flush_capable(hdev))
510 events[7] |= 0x01; /* Enhanced Flush Complete */
511
512 if (lmp_lsto_capable(hdev))
513 events[6] |= 0x80; /* Link Supervision Timeout Changed */
514
515 if (lmp_ssp_capable(hdev)) {
516 events[6] |= 0x01; /* IO Capability Request */
517 events[6] |= 0x02; /* IO Capability Response */
518 events[6] |= 0x04; /* User Confirmation Request */
519 events[6] |= 0x08; /* User Passkey Request */
520 events[6] |= 0x10; /* Remote OOB Data Request */
521 events[6] |= 0x20; /* Simple Pairing Complete */
522 events[7] |= 0x04; /* User Passkey Notification */
523 events[7] |= 0x08; /* Keypress Notification */
524 events[7] |= 0x10; /* Remote Host Supported
525 * Features Notification
526 */
527 }
528
529 if (lmp_le_capable(hdev))
530 events[7] |= 0x20; /* LE Meta-Event */
531
42c6b129 532 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
533}
534
42c6b129 535static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 536{
42c6b129
JH
537 struct hci_dev *hdev = req->hdev;
538
0af801b9
JH
539 if (hdev->dev_type == HCI_AMP)
540 return amp_init2(req);
541
2177bab5 542 if (lmp_bredr_capable(hdev))
42c6b129 543 bredr_setup(req);
56f87901 544 else
a358dc11 545 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
546
547 if (lmp_le_capable(hdev))
42c6b129 548 le_setup(req);
2177bab5 549
0f3adeae
MH
550 /* All Bluetooth 1.2 and later controllers should support the
551 * HCI command for reading the local supported commands.
552 *
553 * Unfortunately some controllers indicate Bluetooth 1.2 support,
554 * but do not have support for this command. If that is the case,
555 * the driver can quirk the behavior and skip reading the local
556 * supported commands.
3f8e2d75 557 */
0f3adeae
MH
558 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 560 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
561
562 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
563 /* When SSP is available, then the host features page
564 * should also be available as well. However some
565 * controllers list the max_page as 0 as long as SSP
566 * has not been enabled. To achieve proper debugging
567 * output, force the minimum max_page to 1 at least.
568 */
569 hdev->max_page = 0x01;
570
d7a5a11d 571 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 572 u8 mode = 0x01;
574ea3c7 573
42c6b129
JH
574 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575 sizeof(mode), &mode);
2177bab5
JH
576 } else {
577 struct hci_cp_write_eir cp;
578
579 memset(hdev->eir, 0, sizeof(hdev->eir));
580 memset(&cp, 0, sizeof(cp));
581
42c6b129 582 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
583 }
584 }
585
043ec9bf
MH
586 if (lmp_inq_rssi_capable(hdev) ||
587 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
588 u8 mode;
589
590 /* If Extended Inquiry Result events are supported, then
591 * they are clearly preferred over Inquiry Result with RSSI
592 * events.
593 */
594 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
595
596 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
597 }
2177bab5
JH
598
599 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 600 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
601
602 if (lmp_ext_feat_capable(hdev)) {
603 struct hci_cp_read_local_ext_features cp;
604
605 cp.page = 0x01;
42c6b129
JH
606 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
607 sizeof(cp), &cp);
2177bab5
JH
608 }
609
d7a5a11d 610 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 611 u8 enable = 1;
42c6b129
JH
612 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
613 &enable);
2177bab5
JH
614 }
615}
616
42c6b129 617static void hci_setup_link_policy(struct hci_request *req)
2177bab5 618{
42c6b129 619 struct hci_dev *hdev = req->hdev;
2177bab5
JH
620 struct hci_cp_write_def_link_policy cp;
621 u16 link_policy = 0;
622
623 if (lmp_rswitch_capable(hdev))
624 link_policy |= HCI_LP_RSWITCH;
625 if (lmp_hold_capable(hdev))
626 link_policy |= HCI_LP_HOLD;
627 if (lmp_sniff_capable(hdev))
628 link_policy |= HCI_LP_SNIFF;
629 if (lmp_park_capable(hdev))
630 link_policy |= HCI_LP_PARK;
631
632 cp.policy = cpu_to_le16(link_policy);
42c6b129 633 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
634}
635
42c6b129 636static void hci_set_le_support(struct hci_request *req)
2177bab5 637{
42c6b129 638 struct hci_dev *hdev = req->hdev;
2177bab5
JH
639 struct hci_cp_write_le_host_supported cp;
640
c73eee91
JH
641 /* LE-only devices do not support explicit enablement */
642 if (!lmp_bredr_capable(hdev))
643 return;
644
2177bab5
JH
645 memset(&cp, 0, sizeof(cp));
646
d7a5a11d 647 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 648 cp.le = 0x01;
32226e4f 649 cp.simul = 0x00;
2177bab5
JH
650 }
651
652 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
653 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
654 &cp);
2177bab5
JH
655}
656
d62e6d67
JH
657static void hci_set_event_mask_page_2(struct hci_request *req)
658{
659 struct hci_dev *hdev = req->hdev;
660 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
661
662 /* If Connectionless Slave Broadcast master role is supported
663 * enable all necessary events for it.
664 */
53b834d2 665 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
666 events[1] |= 0x40; /* Triggered Clock Capture */
667 events[1] |= 0x80; /* Synchronization Train Complete */
668 events[2] |= 0x10; /* Slave Page Response Timeout */
669 events[2] |= 0x20; /* CSB Channel Map Change */
670 }
671
672 /* If Connectionless Slave Broadcast slave role is supported
673 * enable all necessary events for it.
674 */
53b834d2 675 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
676 events[2] |= 0x01; /* Synchronization Train Received */
677 events[2] |= 0x02; /* CSB Receive */
678 events[2] |= 0x04; /* CSB Timeout */
679 events[2] |= 0x08; /* Truncated Page Complete */
680 }
681
40c59fcb 682 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 683 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
684 events[2] |= 0x80;
685
d62e6d67
JH
686 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
687}
688
42c6b129 689static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 690{
42c6b129 691 struct hci_dev *hdev = req->hdev;
d2c5d77f 692 u8 p;
42c6b129 693
0da71f1b
MH
694 hci_setup_event_mask(req);
695
48ce62c4
MH
696 if (hdev->commands[6] & 0x20) {
697 struct hci_cp_read_stored_link_key cp;
698
699 bacpy(&cp.bdaddr, BDADDR_ANY);
700 cp.read_all = 0x01;
701 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
702 }
703
2177bab5 704 if (hdev->commands[5] & 0x10)
42c6b129 705 hci_setup_link_policy(req);
2177bab5 706
417287de
MH
707 if (hdev->commands[8] & 0x01)
708 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
709
710 /* Some older Broadcom based Bluetooth 1.2 controllers do not
711 * support the Read Page Scan Type command. Check support for
712 * this command in the bit mask of supported commands.
713 */
714 if (hdev->commands[13] & 0x01)
715 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
716
9193c6e8
AG
717 if (lmp_le_capable(hdev)) {
718 u8 events[8];
719
720 memset(events, 0, sizeof(events));
4d6c705b
MH
721 events[0] = 0x0f;
722
723 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
724 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
725
726 /* If controller supports the Connection Parameters Request
727 * Link Layer Procedure, enable the corresponding event.
728 */
729 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
730 events[0] |= 0x20; /* LE Remote Connection
731 * Parameter Request
732 */
733
a9f6068e
MH
734 /* If the controller supports the Data Length Extension
735 * feature, enable the corresponding event.
736 */
737 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
738 events[0] |= 0x40; /* LE Data Length Change */
739
4b71bba4
MH
740 /* If the controller supports Extended Scanner Filter
741 * Policies, enable the correspondig event.
742 */
743 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
744 events[1] |= 0x04; /* LE Direct Advertising
745 * Report
746 */
747
5a34bd5f
MH
748 /* If the controller supports the LE Read Local P-256
749 * Public Key command, enable the corresponding event.
750 */
751 if (hdev->commands[34] & 0x02)
752 events[0] |= 0x80; /* LE Read Local P-256
753 * Public Key Complete
754 */
755
756 /* If the controller supports the LE Generate DHKey
757 * command, enable the corresponding event.
758 */
759 if (hdev->commands[34] & 0x04)
760 events[1] |= 0x01; /* LE Generate DHKey Complete */
761
9193c6e8
AG
762 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
763 events);
764
15a49cca
MH
765 if (hdev->commands[25] & 0x40) {
766 /* Read LE Advertising Channel TX Power */
767 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
768 }
769
a9f6068e
MH
770 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
771 /* Read LE Maximum Data Length */
772 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
773
774 /* Read LE Suggested Default Data Length */
775 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
776 }
777
42c6b129 778 hci_set_le_support(req);
9193c6e8 779 }
d2c5d77f
JH
780
781 /* Read features beyond page 1 if available */
782 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
783 struct hci_cp_read_local_ext_features cp;
784
785 cp.page = p;
786 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
787 sizeof(cp), &cp);
788 }
2177bab5
JH
789}
790
5d4e7e8d
JH
791static void hci_init4_req(struct hci_request *req, unsigned long opt)
792{
793 struct hci_dev *hdev = req->hdev;
794
36f260ce
MH
795 /* Some Broadcom based Bluetooth controllers do not support the
796 * Delete Stored Link Key command. They are clearly indicating its
797 * absence in the bit mask of supported commands.
798 *
799 * Check the supported commands and only if the the command is marked
800 * as supported send it. If not supported assume that the controller
801 * does not have actual support for stored link keys which makes this
802 * command redundant anyway.
803 *
804 * Some controllers indicate that they support handling deleting
805 * stored link keys, but they don't. The quirk lets a driver
806 * just disable this command.
807 */
808 if (hdev->commands[6] & 0x80 &&
809 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
810 struct hci_cp_delete_stored_link_key cp;
811
812 bacpy(&cp.bdaddr, BDADDR_ANY);
813 cp.delete_all = 0x01;
814 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
815 sizeof(cp), &cp);
816 }
817
d62e6d67
JH
818 /* Set event mask page 2 if the HCI command for it is supported */
819 if (hdev->commands[22] & 0x04)
820 hci_set_event_mask_page_2(req);
821
109e3191
MH
822 /* Read local codec list if the HCI command is supported */
823 if (hdev->commands[29] & 0x20)
824 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
825
f4fe73ed
MH
826 /* Get MWS transport configuration if the HCI command is supported */
827 if (hdev->commands[30] & 0x08)
828 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
829
5d4e7e8d 830 /* Check for Synchronization Train support */
53b834d2 831 if (lmp_sync_train_capable(hdev))
5d4e7e8d 832 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
833
834 /* Enable Secure Connections if supported and configured */
d7a5a11d 835 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 836 bredr_sc_enabled(hdev)) {
a6d0d690 837 u8 support = 0x01;
574ea3c7 838
a6d0d690
MH
839 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
840 sizeof(support), &support);
841 }
5d4e7e8d
JH
842}
843
2177bab5
JH
844static int __hci_init(struct hci_dev *hdev)
845{
846 int err;
847
848 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
849 if (err < 0)
850 return err;
851
4b4148e9
MH
852 /* The Device Under Test (DUT) mode is special and available for
853 * all controller types. So just create it early on.
854 */
d7a5a11d 855 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
4b4148e9
MH
856 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
857 &dut_mode_fops);
858 }
859
0af801b9
JH
860 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
861 if (err < 0)
862 return err;
863
2177bab5
JH
864 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
865 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 866 * first two stages of init.
2177bab5
JH
867 */
868 if (hdev->dev_type != HCI_BREDR)
869 return 0;
870
5d4e7e8d
JH
871 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
872 if (err < 0)
873 return err;
874
baf27f6e
MH
875 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
876 if (err < 0)
877 return err;
878
ec6cef9c
MH
879 /* This function is only called when the controller is actually in
880 * configured state. When the controller is marked as unconfigured,
881 * this initialization procedure is not run.
882 *
883 * It means that it is possible that a controller runs through its
884 * setup phase and then discovers missing settings. If that is the
885 * case, then this function will not be called. It then will only
886 * be called during the config phase.
887 *
888 * So only when in setup phase or config phase, create the debugfs
889 * entries and register the SMP channels.
baf27f6e 890 */
d7a5a11d
MH
891 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
892 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
893 return 0;
894
60c5f5fb
MH
895 hci_debugfs_create_common(hdev);
896
71c3b60e 897 if (lmp_bredr_capable(hdev))
60c5f5fb 898 hci_debugfs_create_bredr(hdev);
2bfa3531 899
162a3bac 900 if (lmp_le_capable(hdev))
60c5f5fb 901 hci_debugfs_create_le(hdev);
e7b8fc92 902
baf27f6e 903 return 0;
2177bab5
JH
904}
905
0ebca7d6
MH
906static void hci_init0_req(struct hci_request *req, unsigned long opt)
907{
908 struct hci_dev *hdev = req->hdev;
909
910 BT_DBG("%s %ld", hdev->name, opt);
911
912 /* Reset */
913 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
914 hci_reset_req(req, 0);
915
916 /* Read Local Version */
917 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
918
919 /* Read BD Address */
920 if (hdev->set_bdaddr)
921 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
922}
923
924static int __hci_unconf_init(struct hci_dev *hdev)
925{
926 int err;
927
cc78b44b
MH
928 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
929 return 0;
930
0ebca7d6
MH
931 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
932 if (err < 0)
933 return err;
934
935 return 0;
936}
937
42c6b129 938static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
939{
940 __u8 scan = opt;
941
42c6b129 942 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
943
944 /* Inquiry and Page scans */
42c6b129 945 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
946}
947
42c6b129 948static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
949{
950 __u8 auth = opt;
951
42c6b129 952 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
953
954 /* Authentication */
42c6b129 955 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
956}
957
42c6b129 958static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
959{
960 __u8 encrypt = opt;
961
42c6b129 962 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 963
e4e8e37c 964 /* Encryption */
42c6b129 965 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
966}
967
42c6b129 968static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
969{
970 __le16 policy = cpu_to_le16(opt);
971
42c6b129 972 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
973
974 /* Default link policy */
42c6b129 975 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
976}
977
8e87d142 978/* Get HCI device by index.
1da177e4
LT
979 * Device is held on return. */
980struct hci_dev *hci_dev_get(int index)
981{
8035ded4 982 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
983
984 BT_DBG("%d", index);
985
986 if (index < 0)
987 return NULL;
988
989 read_lock(&hci_dev_list_lock);
8035ded4 990 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
991 if (d->id == index) {
992 hdev = hci_dev_hold(d);
993 break;
994 }
995 }
996 read_unlock(&hci_dev_list_lock);
997 return hdev;
998}
1da177e4
LT
999
1000/* ---- Inquiry support ---- */
ff9ef578 1001
30dc78e1
JH
1002bool hci_discovery_active(struct hci_dev *hdev)
1003{
1004 struct discovery_state *discov = &hdev->discovery;
1005
6fbe195d 1006 switch (discov->state) {
343f935b 1007 case DISCOVERY_FINDING:
6fbe195d 1008 case DISCOVERY_RESOLVING:
30dc78e1
JH
1009 return true;
1010
6fbe195d
AG
1011 default:
1012 return false;
1013 }
30dc78e1
JH
1014}
1015
ff9ef578
JH
1016void hci_discovery_set_state(struct hci_dev *hdev, int state)
1017{
bb3e0a33
JH
1018 int old_state = hdev->discovery.state;
1019
ff9ef578
JH
1020 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1021
bb3e0a33 1022 if (old_state == state)
ff9ef578
JH
1023 return;
1024
bb3e0a33
JH
1025 hdev->discovery.state = state;
1026
ff9ef578
JH
1027 switch (state) {
1028 case DISCOVERY_STOPPED:
c54c3860
AG
1029 hci_update_background_scan(hdev);
1030
bb3e0a33 1031 if (old_state != DISCOVERY_STARTING)
7b99b659 1032 mgmt_discovering(hdev, 0);
ff9ef578
JH
1033 break;
1034 case DISCOVERY_STARTING:
1035 break;
343f935b 1036 case DISCOVERY_FINDING:
ff9ef578
JH
1037 mgmt_discovering(hdev, 1);
1038 break;
30dc78e1
JH
1039 case DISCOVERY_RESOLVING:
1040 break;
ff9ef578
JH
1041 case DISCOVERY_STOPPING:
1042 break;
1043 }
ff9ef578
JH
1044}
1045
1f9b9a5d 1046void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1047{
30883512 1048 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1049 struct inquiry_entry *p, *n;
1da177e4 1050
561aafbc
JH
1051 list_for_each_entry_safe(p, n, &cache->all, all) {
1052 list_del(&p->all);
b57c1a56 1053 kfree(p);
1da177e4 1054 }
561aafbc
JH
1055
1056 INIT_LIST_HEAD(&cache->unknown);
1057 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1058}
1059
a8c5fb1a
GP
1060struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1061 bdaddr_t *bdaddr)
1da177e4 1062{
30883512 1063 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1064 struct inquiry_entry *e;
1065
6ed93dc6 1066 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1067
561aafbc
JH
1068 list_for_each_entry(e, &cache->all, all) {
1069 if (!bacmp(&e->data.bdaddr, bdaddr))
1070 return e;
1071 }
1072
1073 return NULL;
1074}
1075
1076struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1077 bdaddr_t *bdaddr)
561aafbc 1078{
30883512 1079 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1080 struct inquiry_entry *e;
1081
6ed93dc6 1082 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1083
1084 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1085 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1086 return e;
1087 }
1088
1089 return NULL;
1da177e4
LT
1090}
1091
30dc78e1 1092struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1093 bdaddr_t *bdaddr,
1094 int state)
30dc78e1
JH
1095{
1096 struct discovery_state *cache = &hdev->discovery;
1097 struct inquiry_entry *e;
1098
6ed93dc6 1099 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1100
1101 list_for_each_entry(e, &cache->resolve, list) {
1102 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1103 return e;
1104 if (!bacmp(&e->data.bdaddr, bdaddr))
1105 return e;
1106 }
1107
1108 return NULL;
1109}
1110
a3d4e20a 1111void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1112 struct inquiry_entry *ie)
a3d4e20a
JH
1113{
1114 struct discovery_state *cache = &hdev->discovery;
1115 struct list_head *pos = &cache->resolve;
1116 struct inquiry_entry *p;
1117
1118 list_del(&ie->list);
1119
1120 list_for_each_entry(p, &cache->resolve, list) {
1121 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1122 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1123 break;
1124 pos = &p->list;
1125 }
1126
1127 list_add(&ie->list, pos);
1128}
1129
af58925c
MH
1130u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1131 bool name_known)
1da177e4 1132{
30883512 1133 struct discovery_state *cache = &hdev->discovery;
70f23020 1134 struct inquiry_entry *ie;
af58925c 1135 u32 flags = 0;
1da177e4 1136
6ed93dc6 1137 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1138
6928a924 1139 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1140
af58925c
MH
1141 if (!data->ssp_mode)
1142 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1143
70f23020 1144 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1145 if (ie) {
af58925c
MH
1146 if (!ie->data.ssp_mode)
1147 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1148
a3d4e20a 1149 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1150 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1151 ie->data.rssi = data->rssi;
1152 hci_inquiry_cache_update_resolve(hdev, ie);
1153 }
1154
561aafbc 1155 goto update;
a3d4e20a 1156 }
561aafbc
JH
1157
1158 /* Entry not in the cache. Add new one. */
27f70f3e 1159 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1160 if (!ie) {
1161 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1162 goto done;
1163 }
561aafbc
JH
1164
1165 list_add(&ie->all, &cache->all);
1166
1167 if (name_known) {
1168 ie->name_state = NAME_KNOWN;
1169 } else {
1170 ie->name_state = NAME_NOT_KNOWN;
1171 list_add(&ie->list, &cache->unknown);
1172 }
70f23020 1173
561aafbc
JH
1174update:
1175 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1176 ie->name_state != NAME_PENDING) {
561aafbc
JH
1177 ie->name_state = NAME_KNOWN;
1178 list_del(&ie->list);
1da177e4
LT
1179 }
1180
70f23020
AE
1181 memcpy(&ie->data, data, sizeof(*data));
1182 ie->timestamp = jiffies;
1da177e4 1183 cache->timestamp = jiffies;
3175405b
JH
1184
1185 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1186 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1187
af58925c
MH
1188done:
1189 return flags;
1da177e4
LT
1190}
1191
1192static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1193{
30883512 1194 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1195 struct inquiry_info *info = (struct inquiry_info *) buf;
1196 struct inquiry_entry *e;
1197 int copied = 0;
1198
561aafbc 1199 list_for_each_entry(e, &cache->all, all) {
1da177e4 1200 struct inquiry_data *data = &e->data;
b57c1a56
JH
1201
1202 if (copied >= num)
1203 break;
1204
1da177e4
LT
1205 bacpy(&info->bdaddr, &data->bdaddr);
1206 info->pscan_rep_mode = data->pscan_rep_mode;
1207 info->pscan_period_mode = data->pscan_period_mode;
1208 info->pscan_mode = data->pscan_mode;
1209 memcpy(info->dev_class, data->dev_class, 3);
1210 info->clock_offset = data->clock_offset;
b57c1a56 1211
1da177e4 1212 info++;
b57c1a56 1213 copied++;
1da177e4
LT
1214 }
1215
1216 BT_DBG("cache %p, copied %d", cache, copied);
1217 return copied;
1218}
1219
42c6b129 1220static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1221{
1222 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1223 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1224 struct hci_cp_inquiry cp;
1225
1226 BT_DBG("%s", hdev->name);
1227
1228 if (test_bit(HCI_INQUIRY, &hdev->flags))
1229 return;
1230
1231 /* Start Inquiry */
1232 memcpy(&cp.lap, &ir->lap, 3);
1233 cp.length = ir->length;
1234 cp.num_rsp = ir->num_rsp;
42c6b129 1235 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1236}
1237
1238int hci_inquiry(void __user *arg)
1239{
1240 __u8 __user *ptr = arg;
1241 struct hci_inquiry_req ir;
1242 struct hci_dev *hdev;
1243 int err = 0, do_inquiry = 0, max_rsp;
1244 long timeo;
1245 __u8 *buf;
1246
1247 if (copy_from_user(&ir, ptr, sizeof(ir)))
1248 return -EFAULT;
1249
5a08ecce
AE
1250 hdev = hci_dev_get(ir.dev_id);
1251 if (!hdev)
1da177e4
LT
1252 return -ENODEV;
1253
d7a5a11d 1254 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1255 err = -EBUSY;
1256 goto done;
1257 }
1258
d7a5a11d 1259 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1260 err = -EOPNOTSUPP;
1261 goto done;
1262 }
1263
5b69bef5
MH
1264 if (hdev->dev_type != HCI_BREDR) {
1265 err = -EOPNOTSUPP;
1266 goto done;
1267 }
1268
d7a5a11d 1269 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1270 err = -EOPNOTSUPP;
1271 goto done;
1272 }
1273
09fd0de5 1274 hci_dev_lock(hdev);
8e87d142 1275 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1276 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1277 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1278 do_inquiry = 1;
1279 }
09fd0de5 1280 hci_dev_unlock(hdev);
1da177e4 1281
04837f64 1282 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1283
1284 if (do_inquiry) {
01178cd4
JH
1285 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1286 timeo);
70f23020
AE
1287 if (err < 0)
1288 goto done;
3e13fa1e
AG
1289
1290 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1291 * cleared). If it is interrupted by a signal, return -EINTR.
1292 */
74316201 1293 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1294 TASK_INTERRUPTIBLE))
1295 return -EINTR;
70f23020 1296 }
1da177e4 1297
8fc9ced3
GP
1298 /* for unlimited number of responses we will use buffer with
1299 * 255 entries
1300 */
1da177e4
LT
1301 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1302
1303 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1304 * copy it to the user space.
1305 */
01df8c31 1306 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1307 if (!buf) {
1da177e4
LT
1308 err = -ENOMEM;
1309 goto done;
1310 }
1311
09fd0de5 1312 hci_dev_lock(hdev);
1da177e4 1313 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1314 hci_dev_unlock(hdev);
1da177e4
LT
1315
1316 BT_DBG("num_rsp %d", ir.num_rsp);
1317
1318 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1319 ptr += sizeof(ir);
1320 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1321 ir.num_rsp))
1da177e4 1322 err = -EFAULT;
8e87d142 1323 } else
1da177e4
LT
1324 err = -EFAULT;
1325
1326 kfree(buf);
1327
1328done:
1329 hci_dev_put(hdev);
1330 return err;
1331}
1332
cbed0ca1 1333static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1334{
1da177e4
LT
1335 int ret = 0;
1336
1da177e4
LT
1337 BT_DBG("%s %p", hdev->name, hdev);
1338
1339 hci_req_lock(hdev);
1340
d7a5a11d 1341 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1342 ret = -ENODEV;
1343 goto done;
1344 }
1345
d7a5a11d
MH
1346 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1347 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1348 /* Check for rfkill but allow the HCI setup stage to
1349 * proceed (which in itself doesn't cause any RF activity).
1350 */
d7a5a11d 1351 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1352 ret = -ERFKILL;
1353 goto done;
1354 }
1355
1356 /* Check for valid public address or a configured static
1357 * random adddress, but let the HCI setup proceed to
1358 * be able to determine if there is a public address
1359 * or not.
1360 *
c6beca0e
MH
1361 * In case of user channel usage, it is not important
1362 * if a public address or static random address is
1363 * available.
1364 *
a5c8f270
MH
1365 * This check is only valid for BR/EDR controllers
1366 * since AMP controllers do not have an address.
1367 */
d7a5a11d 1368 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
c6beca0e 1369 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1370 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1371 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1372 ret = -EADDRNOTAVAIL;
1373 goto done;
1374 }
611b30f7
MH
1375 }
1376
1da177e4
LT
1377 if (test_bit(HCI_UP, &hdev->flags)) {
1378 ret = -EALREADY;
1379 goto done;
1380 }
1381
1da177e4
LT
1382 if (hdev->open(hdev)) {
1383 ret = -EIO;
1384 goto done;
1385 }
1386
f41c70c4
MH
1387 atomic_set(&hdev->cmd_cnt, 1);
1388 set_bit(HCI_INIT, &hdev->flags);
1389
d7a5a11d 1390 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
af202f84
MH
1391 if (hdev->setup)
1392 ret = hdev->setup(hdev);
f41c70c4 1393
af202f84
MH
1394 /* The transport driver can set these quirks before
1395 * creating the HCI device or in its setup callback.
1396 *
1397 * In case any of them is set, the controller has to
1398 * start up as unconfigured.
1399 */
eb1904f4
MH
1400 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1401 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1402 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1403
0ebca7d6
MH
1404 /* For an unconfigured controller it is required to
1405 * read at least the version information provided by
1406 * the Read Local Version Information command.
1407 *
1408 * If the set_bdaddr driver callback is provided, then
1409 * also the original Bluetooth public device address
1410 * will be read using the Read BD Address command.
1411 */
d7a5a11d 1412 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1413 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1414 }
1415
d7a5a11d 1416 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1417 /* If public address change is configured, ensure that
1418 * the address gets programmed. If the driver does not
1419 * support changing the public address, fail the power
1420 * on procedure.
1421 */
1422 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1423 hdev->set_bdaddr)
24c457e2
MH
1424 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1425 else
1426 ret = -EADDRNOTAVAIL;
1427 }
1428
f41c70c4 1429 if (!ret) {
d7a5a11d
MH
1430 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1431 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
f41c70c4 1432 ret = __hci_init(hdev);
1da177e4
LT
1433 }
1434
f41c70c4
MH
1435 clear_bit(HCI_INIT, &hdev->flags);
1436
1da177e4
LT
1437 if (!ret) {
1438 hci_dev_hold(hdev);
a1536da2 1439 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4
LT
1440 set_bit(HCI_UP, &hdev->flags);
1441 hci_notify(hdev, HCI_DEV_UP);
d7a5a11d
MH
1442 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1443 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1444 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1445 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1514b892 1446 hdev->dev_type == HCI_BREDR) {
09fd0de5 1447 hci_dev_lock(hdev);
744cf19e 1448 mgmt_powered(hdev, 1);
09fd0de5 1449 hci_dev_unlock(hdev);
56e5cb86 1450 }
8e87d142 1451 } else {
1da177e4 1452 /* Init failed, cleanup */
3eff45ea 1453 flush_work(&hdev->tx_work);
c347b765 1454 flush_work(&hdev->cmd_work);
b78752cc 1455 flush_work(&hdev->rx_work);
1da177e4
LT
1456
1457 skb_queue_purge(&hdev->cmd_q);
1458 skb_queue_purge(&hdev->rx_q);
1459
1460 if (hdev->flush)
1461 hdev->flush(hdev);
1462
1463 if (hdev->sent_cmd) {
1464 kfree_skb(hdev->sent_cmd);
1465 hdev->sent_cmd = NULL;
1466 }
1467
1468 hdev->close(hdev);
fee746b0 1469 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1470 }
1471
1472done:
1473 hci_req_unlock(hdev);
1da177e4
LT
1474 return ret;
1475}
1476
cbed0ca1
JH
1477/* ---- HCI ioctl helpers ---- */
1478
1479int hci_dev_open(__u16 dev)
1480{
1481 struct hci_dev *hdev;
1482 int err;
1483
1484 hdev = hci_dev_get(dev);
1485 if (!hdev)
1486 return -ENODEV;
1487
4a964404 1488 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1489 * up as user channel. Trying to bring them up as normal devices
1490 * will result into a failure. Only user channel operation is
1491 * possible.
1492 *
1493 * When this function is called for a user channel, the flag
1494 * HCI_USER_CHANNEL will be set first before attempting to
1495 * open the device.
1496 */
d7a5a11d
MH
1497 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1498 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1499 err = -EOPNOTSUPP;
1500 goto done;
1501 }
1502
e1d08f40
JH
1503 /* We need to ensure that no other power on/off work is pending
1504 * before proceeding to call hci_dev_do_open. This is
1505 * particularly important if the setup procedure has not yet
1506 * completed.
1507 */
a69d8927 1508 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1509 cancel_delayed_work(&hdev->power_off);
1510
a5c8f270
MH
1511 /* After this call it is guaranteed that the setup procedure
1512 * has finished. This means that error conditions like RFKILL
1513 * or no valid public or static random address apply.
1514 */
e1d08f40
JH
1515 flush_workqueue(hdev->req_workqueue);
1516
12aa4f0a 1517 /* For controllers not using the management interface and that
b6ae8457 1518 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1519 * so that pairing works for them. Once the management interface
1520 * is in use this bit will be cleared again and userspace has
1521 * to explicitly enable it.
1522 */
d7a5a11d
MH
1523 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1524 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1525 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1526
cbed0ca1
JH
1527 err = hci_dev_do_open(hdev);
1528
fee746b0 1529done:
cbed0ca1 1530 hci_dev_put(hdev);
cbed0ca1
JH
1531 return err;
1532}
1533
d7347f3c
JH
1534/* This function requires the caller holds hdev->lock */
1535static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1536{
1537 struct hci_conn_params *p;
1538
f161dd41
JH
1539 list_for_each_entry(p, &hdev->le_conn_params, list) {
1540 if (p->conn) {
1541 hci_conn_drop(p->conn);
f8aaf9b6 1542 hci_conn_put(p->conn);
f161dd41
JH
1543 p->conn = NULL;
1544 }
d7347f3c 1545 list_del_init(&p->action);
f161dd41 1546 }
d7347f3c
JH
1547
1548 BT_DBG("All LE pending actions cleared");
1549}
1550
1da177e4
LT
1551static int hci_dev_do_close(struct hci_dev *hdev)
1552{
1553 BT_DBG("%s %p", hdev->name, hdev);
1554
d24d8144 1555 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1556 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1557 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1558 /* Execute vendor specific shutdown routine */
1559 if (hdev->shutdown)
1560 hdev->shutdown(hdev);
1561 }
1562
78c04c0b
VCG
1563 cancel_delayed_work(&hdev->power_off);
1564
1da177e4
LT
1565 hci_req_cancel(hdev, ENODEV);
1566 hci_req_lock(hdev);
1567
1568 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1569 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1570 hci_req_unlock(hdev);
1571 return 0;
1572 }
1573
3eff45ea
GP
1574 /* Flush RX and TX works */
1575 flush_work(&hdev->tx_work);
b78752cc 1576 flush_work(&hdev->rx_work);
1da177e4 1577
16ab91ab 1578 if (hdev->discov_timeout > 0) {
e0f9309f 1579 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1580 hdev->discov_timeout = 0;
a358dc11
MH
1581 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1582 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1583 }
1584
a69d8927 1585 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1586 cancel_delayed_work(&hdev->service_cache);
1587
7ba8b4be 1588 cancel_delayed_work_sync(&hdev->le_scan_disable);
2d28cfe7 1589 cancel_delayed_work_sync(&hdev->le_scan_restart);
4518bb0f 1590
d7a5a11d 1591 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1592 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1593
5d900e46
FG
1594 if (hdev->adv_instance_timeout) {
1595 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1596 hdev->adv_instance_timeout = 0;
1597 }
1598
76727c02
JH
1599 /* Avoid potential lockdep warnings from the *_flush() calls by
1600 * ensuring the workqueue is empty up front.
1601 */
1602 drain_workqueue(hdev->workqueue);
1603
09fd0de5 1604 hci_dev_lock(hdev);
1aeb9c65 1605
8f502f84
JH
1606 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1607
a69d8927 1608 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1aeb9c65
JH
1609 if (hdev->dev_type == HCI_BREDR)
1610 mgmt_powered(hdev, 0);
1611 }
1612
1f9b9a5d 1613 hci_inquiry_cache_flush(hdev);
d7347f3c 1614 hci_pend_le_actions_clear(hdev);
f161dd41 1615 hci_conn_hash_flush(hdev);
09fd0de5 1616 hci_dev_unlock(hdev);
1da177e4 1617
64dae967
MH
1618 smp_unregister(hdev);
1619
1da177e4
LT
1620 hci_notify(hdev, HCI_DEV_DOWN);
1621
1622 if (hdev->flush)
1623 hdev->flush(hdev);
1624
1625 /* Reset device */
1626 skb_queue_purge(&hdev->cmd_q);
1627 atomic_set(&hdev->cmd_cnt, 1);
d7a5a11d
MH
1628 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1629 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
a6c511c6 1630 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1631 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1632 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1633 clear_bit(HCI_INIT, &hdev->flags);
1634 }
1635
c347b765
GP
1636 /* flush cmd work */
1637 flush_work(&hdev->cmd_work);
1da177e4
LT
1638
1639 /* Drop queues */
1640 skb_queue_purge(&hdev->rx_q);
1641 skb_queue_purge(&hdev->cmd_q);
1642 skb_queue_purge(&hdev->raw_q);
1643
1644 /* Drop last sent command */
1645 if (hdev->sent_cmd) {
65cc2b49 1646 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1647 kfree_skb(hdev->sent_cmd);
1648 hdev->sent_cmd = NULL;
1649 }
1650
1651 /* After this point our queues are empty
1652 * and no tasks are scheduled. */
1653 hdev->close(hdev);
1654
35b973c9 1655 /* Clear flags */
fee746b0 1656 hdev->flags &= BIT(HCI_RAW);
eacb44df 1657 hci_dev_clear_volatile_flags(hdev);
35b973c9 1658
ced5c338 1659 /* Controller radio is available but is currently powered down */
536619e8 1660 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1661
e59fda8d 1662 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1663 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1664 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1665
1da177e4
LT
1666 hci_req_unlock(hdev);
1667
1668 hci_dev_put(hdev);
1669 return 0;
1670}
1671
1672int hci_dev_close(__u16 dev)
1673{
1674 struct hci_dev *hdev;
1675 int err;
1676
70f23020
AE
1677 hdev = hci_dev_get(dev);
1678 if (!hdev)
1da177e4 1679 return -ENODEV;
8ee56540 1680
d7a5a11d 1681 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1682 err = -EBUSY;
1683 goto done;
1684 }
1685
a69d8927 1686 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1687 cancel_delayed_work(&hdev->power_off);
1688
1da177e4 1689 err = hci_dev_do_close(hdev);
8ee56540 1690
0736cfa8 1691done:
1da177e4
LT
1692 hci_dev_put(hdev);
1693 return err;
1694}
1695
5c912495 1696static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1697{
5c912495 1698 int ret;
1da177e4 1699
5c912495 1700 BT_DBG("%s %p", hdev->name, hdev);
1da177e4
LT
1701
1702 hci_req_lock(hdev);
1da177e4 1703
1da177e4
LT
1704 /* Drop queues */
1705 skb_queue_purge(&hdev->rx_q);
1706 skb_queue_purge(&hdev->cmd_q);
1707
76727c02
JH
1708 /* Avoid potential lockdep warnings from the *_flush() calls by
1709 * ensuring the workqueue is empty up front.
1710 */
1711 drain_workqueue(hdev->workqueue);
1712
09fd0de5 1713 hci_dev_lock(hdev);
1f9b9a5d 1714 hci_inquiry_cache_flush(hdev);
1da177e4 1715 hci_conn_hash_flush(hdev);
09fd0de5 1716 hci_dev_unlock(hdev);
1da177e4
LT
1717
1718 if (hdev->flush)
1719 hdev->flush(hdev);
1720
8e87d142 1721 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1722 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1723
fee746b0 1724 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4 1725
1da177e4 1726 hci_req_unlock(hdev);
1da177e4
LT
1727 return ret;
1728}
1729
5c912495
MH
1730int hci_dev_reset(__u16 dev)
1731{
1732 struct hci_dev *hdev;
1733 int err;
1734
1735 hdev = hci_dev_get(dev);
1736 if (!hdev)
1737 return -ENODEV;
1738
1739 if (!test_bit(HCI_UP, &hdev->flags)) {
1740 err = -ENETDOWN;
1741 goto done;
1742 }
1743
d7a5a11d 1744 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1745 err = -EBUSY;
1746 goto done;
1747 }
1748
d7a5a11d 1749 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1750 err = -EOPNOTSUPP;
1751 goto done;
1752 }
1753
1754 err = hci_dev_do_reset(hdev);
1755
1756done:
1757 hci_dev_put(hdev);
1758 return err;
1759}
1760
1da177e4
LT
1761int hci_dev_reset_stat(__u16 dev)
1762{
1763 struct hci_dev *hdev;
1764 int ret = 0;
1765
70f23020
AE
1766 hdev = hci_dev_get(dev);
1767 if (!hdev)
1da177e4
LT
1768 return -ENODEV;
1769
d7a5a11d 1770 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1771 ret = -EBUSY;
1772 goto done;
1773 }
1774
d7a5a11d 1775 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1776 ret = -EOPNOTSUPP;
1777 goto done;
1778 }
1779
1da177e4
LT
1780 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1781
0736cfa8 1782done:
1da177e4 1783 hci_dev_put(hdev);
1da177e4
LT
1784 return ret;
1785}
1786
123abc08
JH
1787static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1788{
bc6d2d04 1789 bool conn_changed, discov_changed;
123abc08
JH
1790
1791 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1792
1793 if ((scan & SCAN_PAGE))
238be788
MH
1794 conn_changed = !hci_dev_test_and_set_flag(hdev,
1795 HCI_CONNECTABLE);
123abc08 1796 else
a69d8927
MH
1797 conn_changed = hci_dev_test_and_clear_flag(hdev,
1798 HCI_CONNECTABLE);
123abc08 1799
bc6d2d04 1800 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1801 discov_changed = !hci_dev_test_and_set_flag(hdev,
1802 HCI_DISCOVERABLE);
bc6d2d04 1803 } else {
a358dc11 1804 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1805 discov_changed = hci_dev_test_and_clear_flag(hdev,
1806 HCI_DISCOVERABLE);
bc6d2d04
JH
1807 }
1808
d7a5a11d 1809 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1810 return;
1811
bc6d2d04
JH
1812 if (conn_changed || discov_changed) {
1813 /* In case this was disabled through mgmt */
a1536da2 1814 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1815
d7a5a11d 1816 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
bc6d2d04
JH
1817 mgmt_update_adv_data(hdev);
1818
123abc08 1819 mgmt_new_settings(hdev);
bc6d2d04 1820 }
123abc08
JH
1821}
1822
1da177e4
LT
1823int hci_dev_cmd(unsigned int cmd, void __user *arg)
1824{
1825 struct hci_dev *hdev;
1826 struct hci_dev_req dr;
1827 int err = 0;
1828
1829 if (copy_from_user(&dr, arg, sizeof(dr)))
1830 return -EFAULT;
1831
70f23020
AE
1832 hdev = hci_dev_get(dr.dev_id);
1833 if (!hdev)
1da177e4
LT
1834 return -ENODEV;
1835
d7a5a11d 1836 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1837 err = -EBUSY;
1838 goto done;
1839 }
1840
d7a5a11d 1841 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1842 err = -EOPNOTSUPP;
1843 goto done;
1844 }
1845
5b69bef5
MH
1846 if (hdev->dev_type != HCI_BREDR) {
1847 err = -EOPNOTSUPP;
1848 goto done;
1849 }
1850
d7a5a11d 1851 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1852 err = -EOPNOTSUPP;
1853 goto done;
1854 }
1855
1da177e4
LT
1856 switch (cmd) {
1857 case HCISETAUTH:
01178cd4
JH
1858 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1859 HCI_INIT_TIMEOUT);
1da177e4
LT
1860 break;
1861
1862 case HCISETENCRYPT:
1863 if (!lmp_encrypt_capable(hdev)) {
1864 err = -EOPNOTSUPP;
1865 break;
1866 }
1867
1868 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1869 /* Auth must be enabled first */
01178cd4
JH
1870 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1871 HCI_INIT_TIMEOUT);
1da177e4
LT
1872 if (err)
1873 break;
1874 }
1875
01178cd4
JH
1876 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1877 HCI_INIT_TIMEOUT);
1da177e4
LT
1878 break;
1879
1880 case HCISETSCAN:
01178cd4
JH
1881 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1882 HCI_INIT_TIMEOUT);
91a668b0 1883
bc6d2d04
JH
1884 /* Ensure that the connectable and discoverable states
1885 * get correctly modified as this was a non-mgmt change.
91a668b0 1886 */
123abc08
JH
1887 if (!err)
1888 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1889 break;
1890
1da177e4 1891 case HCISETLINKPOL:
01178cd4
JH
1892 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1893 HCI_INIT_TIMEOUT);
1da177e4
LT
1894 break;
1895
1896 case HCISETLINKMODE:
e4e8e37c
MH
1897 hdev->link_mode = ((__u16) dr.dev_opt) &
1898 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1899 break;
1900
1901 case HCISETPTYPE:
1902 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1903 break;
1904
1905 case HCISETACLMTU:
e4e8e37c
MH
1906 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1907 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1908 break;
1909
1910 case HCISETSCOMTU:
e4e8e37c
MH
1911 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1912 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1913 break;
1914
1915 default:
1916 err = -EINVAL;
1917 break;
1918 }
e4e8e37c 1919
0736cfa8 1920done:
1da177e4
LT
1921 hci_dev_put(hdev);
1922 return err;
1923}
1924
1925int hci_get_dev_list(void __user *arg)
1926{
8035ded4 1927 struct hci_dev *hdev;
1da177e4
LT
1928 struct hci_dev_list_req *dl;
1929 struct hci_dev_req *dr;
1da177e4
LT
1930 int n = 0, size, err;
1931 __u16 dev_num;
1932
1933 if (get_user(dev_num, (__u16 __user *) arg))
1934 return -EFAULT;
1935
1936 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1937 return -EINVAL;
1938
1939 size = sizeof(*dl) + dev_num * sizeof(*dr);
1940
70f23020
AE
1941 dl = kzalloc(size, GFP_KERNEL);
1942 if (!dl)
1da177e4
LT
1943 return -ENOMEM;
1944
1945 dr = dl->dev_req;
1946
f20d09d5 1947 read_lock(&hci_dev_list_lock);
8035ded4 1948 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 1949 unsigned long flags = hdev->flags;
c542a06c 1950
2e84d8db
MH
1951 /* When the auto-off is configured it means the transport
1952 * is running, but in that case still indicate that the
1953 * device is actually down.
1954 */
d7a5a11d 1955 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 1956 flags &= ~BIT(HCI_UP);
c542a06c 1957
1da177e4 1958 (dr + n)->dev_id = hdev->id;
2e84d8db 1959 (dr + n)->dev_opt = flags;
c542a06c 1960
1da177e4
LT
1961 if (++n >= dev_num)
1962 break;
1963 }
f20d09d5 1964 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1965
1966 dl->dev_num = n;
1967 size = sizeof(*dl) + n * sizeof(*dr);
1968
1969 err = copy_to_user(arg, dl, size);
1970 kfree(dl);
1971
1972 return err ? -EFAULT : 0;
1973}
1974
1975int hci_get_dev_info(void __user *arg)
1976{
1977 struct hci_dev *hdev;
1978 struct hci_dev_info di;
2e84d8db 1979 unsigned long flags;
1da177e4
LT
1980 int err = 0;
1981
1982 if (copy_from_user(&di, arg, sizeof(di)))
1983 return -EFAULT;
1984
70f23020
AE
1985 hdev = hci_dev_get(di.dev_id);
1986 if (!hdev)
1da177e4
LT
1987 return -ENODEV;
1988
2e84d8db
MH
1989 /* When the auto-off is configured it means the transport
1990 * is running, but in that case still indicate that the
1991 * device is actually down.
1992 */
d7a5a11d 1993 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
1994 flags = hdev->flags & ~BIT(HCI_UP);
1995 else
1996 flags = hdev->flags;
c542a06c 1997
1da177e4
LT
1998 strcpy(di.name, hdev->name);
1999 di.bdaddr = hdev->bdaddr;
60f2a3ed 2000 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2001 di.flags = flags;
1da177e4 2002 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2003 if (lmp_bredr_capable(hdev)) {
2004 di.acl_mtu = hdev->acl_mtu;
2005 di.acl_pkts = hdev->acl_pkts;
2006 di.sco_mtu = hdev->sco_mtu;
2007 di.sco_pkts = hdev->sco_pkts;
2008 } else {
2009 di.acl_mtu = hdev->le_mtu;
2010 di.acl_pkts = hdev->le_pkts;
2011 di.sco_mtu = 0;
2012 di.sco_pkts = 0;
2013 }
1da177e4
LT
2014 di.link_policy = hdev->link_policy;
2015 di.link_mode = hdev->link_mode;
2016
2017 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2018 memcpy(&di.features, &hdev->features, sizeof(di.features));
2019
2020 if (copy_to_user(arg, &di, sizeof(di)))
2021 err = -EFAULT;
2022
2023 hci_dev_put(hdev);
2024
2025 return err;
2026}
2027
2028/* ---- Interface to HCI drivers ---- */
2029
611b30f7
MH
2030static int hci_rfkill_set_block(void *data, bool blocked)
2031{
2032 struct hci_dev *hdev = data;
2033
2034 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2035
d7a5a11d 2036 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2037 return -EBUSY;
2038
5e130367 2039 if (blocked) {
a1536da2 2040 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2041 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2042 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2043 hci_dev_do_close(hdev);
5e130367 2044 } else {
a358dc11 2045 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2046 }
611b30f7
MH
2047
2048 return 0;
2049}
2050
2051static const struct rfkill_ops hci_rfkill_ops = {
2052 .set_block = hci_rfkill_set_block,
2053};
2054
ab81cbf9
JH
2055static void hci_power_on(struct work_struct *work)
2056{
2057 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2058 int err;
ab81cbf9
JH
2059
2060 BT_DBG("%s", hdev->name);
2061
cbed0ca1 2062 err = hci_dev_do_open(hdev);
96570ffc 2063 if (err < 0) {
3ad67582 2064 hci_dev_lock(hdev);
96570ffc 2065 mgmt_set_powered_failed(hdev, err);
3ad67582 2066 hci_dev_unlock(hdev);
ab81cbf9 2067 return;
96570ffc 2068 }
ab81cbf9 2069
a5c8f270
MH
2070 /* During the HCI setup phase, a few error conditions are
2071 * ignored and they need to be checked now. If they are still
2072 * valid, it is important to turn the device back off.
2073 */
d7a5a11d
MH
2074 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2075 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
a5c8f270
MH
2076 (hdev->dev_type == HCI_BREDR &&
2077 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2078 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2079 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2080 hci_dev_do_close(hdev);
d7a5a11d 2081 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2082 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2083 HCI_AUTO_OFF_TIMEOUT);
bf543036 2084 }
ab81cbf9 2085
a69d8927 2086 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2087 /* For unconfigured devices, set the HCI_RAW flag
2088 * so that userspace can easily identify them.
4a964404 2089 */
d7a5a11d 2090 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2091 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2092
2093 /* For fully configured devices, this will send
2094 * the Index Added event. For unconfigured devices,
2095 * it will send Unconfigued Index Added event.
2096 *
2097 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2098 * and no event will be send.
2099 */
2100 mgmt_index_added(hdev);
a69d8927 2101 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2102 /* When the controller is now configured, then it
2103 * is important to clear the HCI_RAW flag.
2104 */
d7a5a11d 2105 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2106 clear_bit(HCI_RAW, &hdev->flags);
2107
d603b76b
MH
2108 /* Powering on the controller with HCI_CONFIG set only
2109 * happens with the transition from unconfigured to
2110 * configured. This will send the Index Added event.
2111 */
744cf19e 2112 mgmt_index_added(hdev);
fee746b0 2113 }
ab81cbf9
JH
2114}
2115
2116static void hci_power_off(struct work_struct *work)
2117{
3243553f 2118 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2119 power_off.work);
ab81cbf9
JH
2120
2121 BT_DBG("%s", hdev->name);
2122
8ee56540 2123 hci_dev_do_close(hdev);
ab81cbf9
JH
2124}
2125
c7741d16
MH
2126static void hci_error_reset(struct work_struct *work)
2127{
2128 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2129
2130 BT_DBG("%s", hdev->name);
2131
2132 if (hdev->hw_error)
2133 hdev->hw_error(hdev, hdev->hw_error_code);
2134 else
2135 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2136 hdev->hw_error_code);
2137
2138 if (hci_dev_do_close(hdev))
2139 return;
2140
c7741d16
MH
2141 hci_dev_do_open(hdev);
2142}
2143
16ab91ab
JH
2144static void hci_discov_off(struct work_struct *work)
2145{
2146 struct hci_dev *hdev;
16ab91ab
JH
2147
2148 hdev = container_of(work, struct hci_dev, discov_off.work);
2149
2150 BT_DBG("%s", hdev->name);
2151
d1967ff8 2152 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2153}
2154
5d900e46
FG
2155static void hci_adv_timeout_expire(struct work_struct *work)
2156{
2157 struct hci_dev *hdev;
2158
2159 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2160
2161 BT_DBG("%s", hdev->name);
2162
2163 mgmt_adv_timeout_expired(hdev);
2164}
2165
35f7498a 2166void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2167{
4821002c 2168 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2169
4821002c
JH
2170 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2171 list_del(&uuid->list);
2aeb9a1a
JH
2172 kfree(uuid);
2173 }
2aeb9a1a
JH
2174}
2175
35f7498a 2176void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2177{
0378b597 2178 struct link_key *key;
55ed8ca1 2179
0378b597
JH
2180 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2181 list_del_rcu(&key->list);
2182 kfree_rcu(key, rcu);
55ed8ca1 2183 }
55ed8ca1
JH
2184}
2185
35f7498a 2186void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2187{
970d0f1b 2188 struct smp_ltk *k;
b899efaf 2189
970d0f1b
JH
2190 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2191 list_del_rcu(&k->list);
2192 kfree_rcu(k, rcu);
b899efaf 2193 }
b899efaf
VCG
2194}
2195
970c4e46
JH
2196void hci_smp_irks_clear(struct hci_dev *hdev)
2197{
adae20cb 2198 struct smp_irk *k;
970c4e46 2199
adae20cb
JH
2200 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2201 list_del_rcu(&k->list);
2202 kfree_rcu(k, rcu);
970c4e46
JH
2203 }
2204}
2205
55ed8ca1
JH
2206struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2207{
8035ded4 2208 struct link_key *k;
55ed8ca1 2209
0378b597
JH
2210 rcu_read_lock();
2211 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2212 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2213 rcu_read_unlock();
55ed8ca1 2214 return k;
0378b597
JH
2215 }
2216 }
2217 rcu_read_unlock();
55ed8ca1
JH
2218
2219 return NULL;
2220}
2221
745c0ce3 2222static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2223 u8 key_type, u8 old_key_type)
d25e28ab
JH
2224{
2225 /* Legacy key */
2226 if (key_type < 0x03)
745c0ce3 2227 return true;
d25e28ab
JH
2228
2229 /* Debug keys are insecure so don't store them persistently */
2230 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2231 return false;
d25e28ab
JH
2232
2233 /* Changed combination key and there's no previous one */
2234 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2235 return false;
d25e28ab
JH
2236
2237 /* Security mode 3 case */
2238 if (!conn)
745c0ce3 2239 return true;
d25e28ab 2240
e3befab9
JH
2241 /* BR/EDR key derived using SC from an LE link */
2242 if (conn->type == LE_LINK)
2243 return true;
2244
d25e28ab
JH
2245 /* Neither local nor remote side had no-bonding as requirement */
2246 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2247 return true;
d25e28ab
JH
2248
2249 /* Local side had dedicated bonding as requirement */
2250 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2251 return true;
d25e28ab
JH
2252
2253 /* Remote side had dedicated bonding as requirement */
2254 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2255 return true;
d25e28ab
JH
2256
2257 /* If none of the above criteria match, then don't store the key
2258 * persistently */
745c0ce3 2259 return false;
d25e28ab
JH
2260}
2261
e804d25d 2262static u8 ltk_role(u8 type)
98a0b845 2263{
e804d25d
JH
2264 if (type == SMP_LTK)
2265 return HCI_ROLE_MASTER;
98a0b845 2266
e804d25d 2267 return HCI_ROLE_SLAVE;
98a0b845
JH
2268}
2269
f3a73d97
JH
2270struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2271 u8 addr_type, u8 role)
75d262c2 2272{
c9839a11 2273 struct smp_ltk *k;
75d262c2 2274
970d0f1b
JH
2275 rcu_read_lock();
2276 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2277 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2278 continue;
2279
923e2414 2280 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2281 rcu_read_unlock();
75d262c2 2282 return k;
970d0f1b
JH
2283 }
2284 }
2285 rcu_read_unlock();
75d262c2
VCG
2286
2287 return NULL;
2288}
75d262c2 2289
970c4e46
JH
2290struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2291{
2292 struct smp_irk *irk;
2293
adae20cb
JH
2294 rcu_read_lock();
2295 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2296 if (!bacmp(&irk->rpa, rpa)) {
2297 rcu_read_unlock();
970c4e46 2298 return irk;
adae20cb 2299 }
970c4e46
JH
2300 }
2301
adae20cb 2302 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2303 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2304 bacpy(&irk->rpa, rpa);
adae20cb 2305 rcu_read_unlock();
970c4e46
JH
2306 return irk;
2307 }
2308 }
adae20cb 2309 rcu_read_unlock();
970c4e46
JH
2310
2311 return NULL;
2312}
2313
2314struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2315 u8 addr_type)
2316{
2317 struct smp_irk *irk;
2318
6cfc9988
JH
2319 /* Identity Address must be public or static random */
2320 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2321 return NULL;
2322
adae20cb
JH
2323 rcu_read_lock();
2324 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2325 if (addr_type == irk->addr_type &&
adae20cb
JH
2326 bacmp(bdaddr, &irk->bdaddr) == 0) {
2327 rcu_read_unlock();
970c4e46 2328 return irk;
adae20cb 2329 }
970c4e46 2330 }
adae20cb 2331 rcu_read_unlock();
970c4e46
JH
2332
2333 return NULL;
2334}
2335
567fa2aa 2336struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2337 bdaddr_t *bdaddr, u8 *val, u8 type,
2338 u8 pin_len, bool *persistent)
55ed8ca1
JH
2339{
2340 struct link_key *key, *old_key;
745c0ce3 2341 u8 old_key_type;
55ed8ca1
JH
2342
2343 old_key = hci_find_link_key(hdev, bdaddr);
2344 if (old_key) {
2345 old_key_type = old_key->type;
2346 key = old_key;
2347 } else {
12adcf3a 2348 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2349 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2350 if (!key)
567fa2aa 2351 return NULL;
0378b597 2352 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2353 }
2354
6ed93dc6 2355 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2356
d25e28ab
JH
2357 /* Some buggy controller combinations generate a changed
2358 * combination key for legacy pairing even when there's no
2359 * previous key */
2360 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2361 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2362 type = HCI_LK_COMBINATION;
655fe6ec
JH
2363 if (conn)
2364 conn->key_type = type;
2365 }
d25e28ab 2366
55ed8ca1 2367 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2368 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2369 key->pin_len = pin_len;
2370
b6020ba0 2371 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2372 key->type = old_key_type;
4748fed2
JH
2373 else
2374 key->type = type;
2375
7652ff6a
JH
2376 if (persistent)
2377 *persistent = hci_persistent_key(hdev, conn, type,
2378 old_key_type);
4df378a1 2379
567fa2aa 2380 return key;
55ed8ca1
JH
2381}
2382
ca9142b8 2383struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2384 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2385 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2386{
c9839a11 2387 struct smp_ltk *key, *old_key;
e804d25d 2388 u8 role = ltk_role(type);
75d262c2 2389
f3a73d97 2390 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2391 if (old_key)
75d262c2 2392 key = old_key;
c9839a11 2393 else {
0a14ab41 2394 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2395 if (!key)
ca9142b8 2396 return NULL;
970d0f1b 2397 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2398 }
2399
75d262c2 2400 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2401 key->bdaddr_type = addr_type;
2402 memcpy(key->val, tk, sizeof(key->val));
2403 key->authenticated = authenticated;
2404 key->ediv = ediv;
fe39c7b2 2405 key->rand = rand;
c9839a11
VCG
2406 key->enc_size = enc_size;
2407 key->type = type;
75d262c2 2408
ca9142b8 2409 return key;
75d262c2
VCG
2410}
2411
ca9142b8
JH
2412struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2413 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2414{
2415 struct smp_irk *irk;
2416
2417 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2418 if (!irk) {
2419 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2420 if (!irk)
ca9142b8 2421 return NULL;
970c4e46
JH
2422
2423 bacpy(&irk->bdaddr, bdaddr);
2424 irk->addr_type = addr_type;
2425
adae20cb 2426 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2427 }
2428
2429 memcpy(irk->val, val, 16);
2430 bacpy(&irk->rpa, rpa);
2431
ca9142b8 2432 return irk;
970c4e46
JH
2433}
2434
55ed8ca1
JH
2435int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2436{
2437 struct link_key *key;
2438
2439 key = hci_find_link_key(hdev, bdaddr);
2440 if (!key)
2441 return -ENOENT;
2442
6ed93dc6 2443 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2444
0378b597
JH
2445 list_del_rcu(&key->list);
2446 kfree_rcu(key, rcu);
55ed8ca1
JH
2447
2448 return 0;
2449}
2450
e0b2b27e 2451int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2452{
970d0f1b 2453 struct smp_ltk *k;
c51ffa0b 2454 int removed = 0;
b899efaf 2455
970d0f1b 2456 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2457 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2458 continue;
2459
6ed93dc6 2460 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2461
970d0f1b
JH
2462 list_del_rcu(&k->list);
2463 kfree_rcu(k, rcu);
c51ffa0b 2464 removed++;
b899efaf
VCG
2465 }
2466
c51ffa0b 2467 return removed ? 0 : -ENOENT;
b899efaf
VCG
2468}
2469
a7ec7338
JH
2470void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2471{
adae20cb 2472 struct smp_irk *k;
a7ec7338 2473
adae20cb 2474 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2475 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2476 continue;
2477
2478 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2479
adae20cb
JH
2480 list_del_rcu(&k->list);
2481 kfree_rcu(k, rcu);
a7ec7338
JH
2482 }
2483}
2484
55e76b38
JH
2485bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2486{
2487 struct smp_ltk *k;
4ba9faf3 2488 struct smp_irk *irk;
55e76b38
JH
2489 u8 addr_type;
2490
2491 if (type == BDADDR_BREDR) {
2492 if (hci_find_link_key(hdev, bdaddr))
2493 return true;
2494 return false;
2495 }
2496
2497 /* Convert to HCI addr type which struct smp_ltk uses */
2498 if (type == BDADDR_LE_PUBLIC)
2499 addr_type = ADDR_LE_DEV_PUBLIC;
2500 else
2501 addr_type = ADDR_LE_DEV_RANDOM;
2502
4ba9faf3
JH
2503 irk = hci_get_irk(hdev, bdaddr, addr_type);
2504 if (irk) {
2505 bdaddr = &irk->bdaddr;
2506 addr_type = irk->addr_type;
2507 }
2508
55e76b38
JH
2509 rcu_read_lock();
2510 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2511 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2512 rcu_read_unlock();
55e76b38 2513 return true;
87c8b28d 2514 }
55e76b38
JH
2515 }
2516 rcu_read_unlock();
2517
2518 return false;
2519}
2520
6bd32326 2521/* HCI command timer function */
65cc2b49 2522static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2523{
65cc2b49
MH
2524 struct hci_dev *hdev = container_of(work, struct hci_dev,
2525 cmd_timer.work);
6bd32326 2526
bda4f23a
AE
2527 if (hdev->sent_cmd) {
2528 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2529 u16 opcode = __le16_to_cpu(sent->opcode);
2530
2531 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2532 } else {
2533 BT_ERR("%s command tx timeout", hdev->name);
2534 }
2535
6bd32326 2536 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2537 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2538}
2539
2763eda6 2540struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2541 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2542{
2543 struct oob_data *data;
2544
6928a924
JH
2545 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2546 if (bacmp(bdaddr, &data->bdaddr) != 0)
2547 continue;
2548 if (data->bdaddr_type != bdaddr_type)
2549 continue;
2550 return data;
2551 }
2763eda6
SJ
2552
2553 return NULL;
2554}
2555
6928a924
JH
2556int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2557 u8 bdaddr_type)
2763eda6
SJ
2558{
2559 struct oob_data *data;
2560
6928a924 2561 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2562 if (!data)
2563 return -ENOENT;
2564
6928a924 2565 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2566
2567 list_del(&data->list);
2568 kfree(data);
2569
2570 return 0;
2571}
2572
35f7498a 2573void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2574{
2575 struct oob_data *data, *n;
2576
2577 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2578 list_del(&data->list);
2579 kfree(data);
2580 }
2763eda6
SJ
2581}
2582
0798872e 2583int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2584 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2585 u8 *hash256, u8 *rand256)
2763eda6
SJ
2586{
2587 struct oob_data *data;
2588
6928a924 2589 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2590 if (!data) {
0a14ab41 2591 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2592 if (!data)
2593 return -ENOMEM;
2594
2595 bacpy(&data->bdaddr, bdaddr);
6928a924 2596 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2597 list_add(&data->list, &hdev->remote_oob_data);
2598 }
2599
81328d5c
JH
2600 if (hash192 && rand192) {
2601 memcpy(data->hash192, hash192, sizeof(data->hash192));
2602 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2603 if (hash256 && rand256)
2604 data->present = 0x03;
81328d5c
JH
2605 } else {
2606 memset(data->hash192, 0, sizeof(data->hash192));
2607 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2608 if (hash256 && rand256)
2609 data->present = 0x02;
2610 else
2611 data->present = 0x00;
0798872e
MH
2612 }
2613
81328d5c
JH
2614 if (hash256 && rand256) {
2615 memcpy(data->hash256, hash256, sizeof(data->hash256));
2616 memcpy(data->rand256, rand256, sizeof(data->rand256));
2617 } else {
2618 memset(data->hash256, 0, sizeof(data->hash256));
2619 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2620 if (hash192 && rand192)
2621 data->present = 0x01;
81328d5c 2622 }
0798872e 2623
6ed93dc6 2624 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2625
2626 return 0;
2627}
2628
d2609b34
FG
2629/* This function requires the caller holds hdev->lock */
2630struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2631{
2632 struct adv_info *adv_instance;
2633
2634 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2635 if (adv_instance->instance == instance)
2636 return adv_instance;
2637 }
2638
2639 return NULL;
2640}
2641
2642/* This function requires the caller holds hdev->lock */
2643struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2644 struct adv_info *cur_instance;
2645
2646 cur_instance = hci_find_adv_instance(hdev, instance);
2647 if (!cur_instance)
2648 return NULL;
2649
2650 if (cur_instance == list_last_entry(&hdev->adv_instances,
2651 struct adv_info, list))
2652 return list_first_entry(&hdev->adv_instances,
2653 struct adv_info, list);
2654 else
2655 return list_next_entry(cur_instance, list);
2656}
2657
2658/* This function requires the caller holds hdev->lock */
2659int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2660{
2661 struct adv_info *adv_instance;
2662
2663 adv_instance = hci_find_adv_instance(hdev, instance);
2664 if (!adv_instance)
2665 return -ENOENT;
2666
2667 BT_DBG("%s removing %dMR", hdev->name, instance);
2668
5d900e46
FG
2669 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2670 cancel_delayed_work(&hdev->adv_instance_expire);
2671 hdev->adv_instance_timeout = 0;
2672 }
2673
d2609b34
FG
2674 list_del(&adv_instance->list);
2675 kfree(adv_instance);
2676
2677 hdev->adv_instance_cnt--;
2678
2679 return 0;
2680}
2681
2682/* This function requires the caller holds hdev->lock */
2683void hci_adv_instances_clear(struct hci_dev *hdev)
2684{
2685 struct adv_info *adv_instance, *n;
2686
5d900e46
FG
2687 if (hdev->adv_instance_timeout) {
2688 cancel_delayed_work(&hdev->adv_instance_expire);
2689 hdev->adv_instance_timeout = 0;
2690 }
2691
d2609b34
FG
2692 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2693 list_del(&adv_instance->list);
2694 kfree(adv_instance);
2695 }
2696
2697 hdev->adv_instance_cnt = 0;
2698}
2699
2700/* This function requires the caller holds hdev->lock */
2701int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2702 u16 adv_data_len, u8 *adv_data,
2703 u16 scan_rsp_len, u8 *scan_rsp_data,
2704 u16 timeout, u16 duration)
2705{
2706 struct adv_info *adv_instance;
2707
2708 adv_instance = hci_find_adv_instance(hdev, instance);
2709 if (adv_instance) {
2710 memset(adv_instance->adv_data, 0,
2711 sizeof(adv_instance->adv_data));
2712 memset(adv_instance->scan_rsp_data, 0,
2713 sizeof(adv_instance->scan_rsp_data));
2714 } else {
2715 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2716 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2717 return -EOVERFLOW;
2718
39ecfad6 2719 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2720 if (!adv_instance)
2721 return -ENOMEM;
2722
fffd38bc 2723 adv_instance->pending = true;
d2609b34
FG
2724 adv_instance->instance = instance;
2725 list_add(&adv_instance->list, &hdev->adv_instances);
2726 hdev->adv_instance_cnt++;
2727 }
2728
2729 adv_instance->flags = flags;
2730 adv_instance->adv_data_len = adv_data_len;
2731 adv_instance->scan_rsp_len = scan_rsp_len;
2732
2733 if (adv_data_len)
2734 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2735
2736 if (scan_rsp_len)
2737 memcpy(adv_instance->scan_rsp_data,
2738 scan_rsp_data, scan_rsp_len);
2739
2740 adv_instance->timeout = timeout;
5d900e46 2741 adv_instance->remaining_time = timeout;
d2609b34
FG
2742
2743 if (duration == 0)
2744 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2745 else
2746 adv_instance->duration = duration;
2747
2748 BT_DBG("%s for %dMR", hdev->name, instance);
2749
2750 return 0;
2751}
2752
dcc36c16 2753struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2754 bdaddr_t *bdaddr, u8 type)
b2a66aad 2755{
8035ded4 2756 struct bdaddr_list *b;
b2a66aad 2757
dcc36c16 2758 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2759 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2760 return b;
b9ee0a78 2761 }
b2a66aad
AJ
2762
2763 return NULL;
2764}
2765
dcc36c16 2766void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
2767{
2768 struct list_head *p, *n;
2769
dcc36c16 2770 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 2771 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2772
2773 list_del(p);
2774 kfree(b);
2775 }
b2a66aad
AJ
2776}
2777
dcc36c16 2778int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2779{
2780 struct bdaddr_list *entry;
b2a66aad 2781
b9ee0a78 2782 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2783 return -EBADF;
2784
dcc36c16 2785 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2786 return -EEXIST;
b2a66aad 2787
27f70f3e 2788 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2789 if (!entry)
2790 return -ENOMEM;
b2a66aad
AJ
2791
2792 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2793 entry->bdaddr_type = type;
b2a66aad 2794
dcc36c16 2795 list_add(&entry->list, list);
b2a66aad 2796
2a8357f2 2797 return 0;
b2a66aad
AJ
2798}
2799
dcc36c16 2800int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2801{
2802 struct bdaddr_list *entry;
b2a66aad 2803
35f7498a 2804 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2805 hci_bdaddr_list_clear(list);
35f7498a
JH
2806 return 0;
2807 }
b2a66aad 2808
dcc36c16 2809 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2810 if (!entry)
2811 return -ENOENT;
2812
2813 list_del(&entry->list);
2814 kfree(entry);
2815
2816 return 0;
2817}
2818
15819a70
AG
2819/* This function requires the caller holds hdev->lock */
2820struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2821 bdaddr_t *addr, u8 addr_type)
2822{
2823 struct hci_conn_params *params;
2824
738f6185
JH
2825 /* The conn params list only contains identity addresses */
2826 if (!hci_is_identity_address(addr, addr_type))
2827 return NULL;
2828
15819a70
AG
2829 list_for_each_entry(params, &hdev->le_conn_params, list) {
2830 if (bacmp(&params->addr, addr) == 0 &&
2831 params->addr_type == addr_type) {
2832 return params;
2833 }
2834 }
2835
2836 return NULL;
2837}
2838
4b10966f 2839/* This function requires the caller holds hdev->lock */
501f8827
JH
2840struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2841 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2842{
912b42ef 2843 struct hci_conn_params *param;
a9b0a04c 2844
738f6185
JH
2845 /* The list only contains identity addresses */
2846 if (!hci_is_identity_address(addr, addr_type))
2847 return NULL;
a9b0a04c 2848
501f8827 2849 list_for_each_entry(param, list, action) {
912b42ef
JH
2850 if (bacmp(&param->addr, addr) == 0 &&
2851 param->addr_type == addr_type)
2852 return param;
4b10966f
MH
2853 }
2854
2855 return NULL;
a9b0a04c
AG
2856}
2857
15819a70 2858/* This function requires the caller holds hdev->lock */
51d167c0
MH
2859struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2860 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2861{
2862 struct hci_conn_params *params;
2863
c46245b3 2864 if (!hci_is_identity_address(addr, addr_type))
51d167c0 2865 return NULL;
a9b0a04c 2866
15819a70 2867 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2868 if (params)
51d167c0 2869 return params;
15819a70
AG
2870
2871 params = kzalloc(sizeof(*params), GFP_KERNEL);
2872 if (!params) {
2873 BT_ERR("Out of memory");
51d167c0 2874 return NULL;
15819a70
AG
2875 }
2876
2877 bacpy(&params->addr, addr);
2878 params->addr_type = addr_type;
cef952ce
AG
2879
2880 list_add(&params->list, &hdev->le_conn_params);
93450c75 2881 INIT_LIST_HEAD(&params->action);
cef952ce 2882
bf5b3c8b
MH
2883 params->conn_min_interval = hdev->le_conn_min_interval;
2884 params->conn_max_interval = hdev->le_conn_max_interval;
2885 params->conn_latency = hdev->le_conn_latency;
2886 params->supervision_timeout = hdev->le_supv_timeout;
2887 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2888
2889 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2890
51d167c0 2891 return params;
bf5b3c8b
MH
2892}
2893
f6c63249 2894static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2895{
f8aaf9b6 2896 if (params->conn) {
f161dd41 2897 hci_conn_drop(params->conn);
f8aaf9b6
JH
2898 hci_conn_put(params->conn);
2899 }
f161dd41 2900
95305baa 2901 list_del(&params->action);
15819a70
AG
2902 list_del(&params->list);
2903 kfree(params);
f6c63249
JH
2904}
2905
2906/* This function requires the caller holds hdev->lock */
2907void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2908{
2909 struct hci_conn_params *params;
2910
2911 params = hci_conn_params_lookup(hdev, addr, addr_type);
2912 if (!params)
2913 return;
2914
2915 hci_conn_params_free(params);
15819a70 2916
95305baa
JH
2917 hci_update_background_scan(hdev);
2918
15819a70
AG
2919 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2920}
2921
2922/* This function requires the caller holds hdev->lock */
55af49a8 2923void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2924{
2925 struct hci_conn_params *params, *tmp;
2926
2927 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
2928 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2929 continue;
15819a70
AG
2930 list_del(&params->list);
2931 kfree(params);
2932 }
2933
55af49a8 2934 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
2935}
2936
2937/* This function requires the caller holds hdev->lock */
373110c5 2938void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 2939{
15819a70 2940 struct hci_conn_params *params, *tmp;
77a77a30 2941
f6c63249
JH
2942 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2943 hci_conn_params_free(params);
77a77a30 2944
a4790dbd 2945 hci_update_background_scan(hdev);
77a77a30 2946
15819a70 2947 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
2948}
2949
1904a853 2950static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7ba8b4be 2951{
4c87eaab
AG
2952 if (status) {
2953 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2954
4c87eaab
AG
2955 hci_dev_lock(hdev);
2956 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2957 hci_dev_unlock(hdev);
2958 return;
2959 }
7ba8b4be
AG
2960}
2961
1904a853
MH
2962static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2963 u16 opcode)
7ba8b4be 2964{
4c87eaab
AG
2965 /* General inquiry access code (GIAC) */
2966 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4c87eaab 2967 struct hci_cp_inquiry cp;
7ba8b4be
AG
2968 int err;
2969
4c87eaab
AG
2970 if (status) {
2971 BT_ERR("Failed to disable LE scanning: status %d", status);
2972 return;
2973 }
7ba8b4be 2974
2d28cfe7
JP
2975 hdev->discovery.scan_start = 0;
2976
4c87eaab
AG
2977 switch (hdev->discovery.type) {
2978 case DISCOV_TYPE_LE:
2979 hci_dev_lock(hdev);
2980 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2981 hci_dev_unlock(hdev);
2982 break;
7ba8b4be 2983
4c87eaab 2984 case DISCOV_TYPE_INTERLEAVED:
4c87eaab 2985 hci_dev_lock(hdev);
7dbfac1d 2986
07d2334a
JP
2987 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2988 &hdev->quirks)) {
2989 /* If we were running LE only scan, change discovery
2990 * state. If we were running both LE and BR/EDR inquiry
2991 * simultaneously, and BR/EDR inquiry is already
2992 * finished, stop discovery, otherwise BR/EDR inquiry
177d0506
WK
2993 * will stop discovery when finished. If we will resolve
2994 * remote device name, do not change discovery state.
07d2334a 2995 */
177d0506
WK
2996 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2997 hdev->discovery.state != DISCOVERY_RESOLVING)
07d2334a
JP
2998 hci_discovery_set_state(hdev,
2999 DISCOVERY_STOPPED);
3000 } else {
baf880a9
JH
3001 struct hci_request req;
3002
07d2334a
JP
3003 hci_inquiry_cache_flush(hdev);
3004
baf880a9
JH
3005 hci_req_init(&req, hdev);
3006
3007 memset(&cp, 0, sizeof(cp));
3008 memcpy(&cp.lap, lap, sizeof(cp.lap));
3009 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3010 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3011
07d2334a
JP
3012 err = hci_req_run(&req, inquiry_complete);
3013 if (err) {
3014 BT_ERR("Inquiry request failed: err %d", err);
3015 hci_discovery_set_state(hdev,
3016 DISCOVERY_STOPPED);
3017 }
4c87eaab 3018 }
7dbfac1d 3019
4c87eaab
AG
3020 hci_dev_unlock(hdev);
3021 break;
7dbfac1d 3022 }
7dbfac1d
AG
3023}
3024
7ba8b4be
AG
3025static void le_scan_disable_work(struct work_struct *work)
3026{
3027 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3028 le_scan_disable.work);
4c87eaab
AG
3029 struct hci_request req;
3030 int err;
7ba8b4be
AG
3031
3032 BT_DBG("%s", hdev->name);
3033
2d28cfe7
JP
3034 cancel_delayed_work_sync(&hdev->le_scan_restart);
3035
4c87eaab 3036 hci_req_init(&req, hdev);
28b75a89 3037
b1efcc28 3038 hci_req_add_le_scan_disable(&req);
28b75a89 3039
4c87eaab
AG
3040 err = hci_req_run(&req, le_scan_disable_work_complete);
3041 if (err)
3042 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3043}
3044
2d28cfe7
JP
3045static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3046 u16 opcode)
3047{
3048 unsigned long timeout, duration, scan_start, now;
3049
3050 BT_DBG("%s", hdev->name);
3051
3052 if (status) {
3053 BT_ERR("Failed to restart LE scan: status %d", status);
3054 return;
3055 }
3056
3057 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3058 !hdev->discovery.scan_start)
3059 return;
3060
3061 /* When the scan was started, hdev->le_scan_disable has been queued
3062 * after duration from scan_start. During scan restart this job
3063 * has been canceled, and we need to queue it again after proper
3064 * timeout, to make sure that scan does not run indefinitely.
3065 */
3066 duration = hdev->discovery.scan_duration;
3067 scan_start = hdev->discovery.scan_start;
3068 now = jiffies;
3069 if (now - scan_start <= duration) {
3070 int elapsed;
3071
3072 if (now >= scan_start)
3073 elapsed = now - scan_start;
3074 else
3075 elapsed = ULONG_MAX - scan_start + now;
3076
3077 timeout = duration - elapsed;
3078 } else {
3079 timeout = 0;
3080 }
3081 queue_delayed_work(hdev->workqueue,
3082 &hdev->le_scan_disable, timeout);
3083}
3084
3085static void le_scan_restart_work(struct work_struct *work)
3086{
3087 struct hci_dev *hdev = container_of(work, struct hci_dev,
3088 le_scan_restart.work);
3089 struct hci_request req;
3090 struct hci_cp_le_set_scan_enable cp;
3091 int err;
3092
3093 BT_DBG("%s", hdev->name);
3094
3095 /* If controller is not scanning we are done. */
d7a5a11d 3096 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2d28cfe7
JP
3097 return;
3098
3099 hci_req_init(&req, hdev);
3100
3101 hci_req_add_le_scan_disable(&req);
3102
3103 memset(&cp, 0, sizeof(cp));
3104 cp.enable = LE_SCAN_ENABLE;
3105 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3106 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3107
3108 err = hci_req_run(&req, le_scan_restart_work_complete);
3109 if (err)
3110 BT_ERR("Restart LE scan request failed: err %d", err);
3111}
3112
a1f4c318
JH
3113/* Copy the Identity Address of the controller.
3114 *
3115 * If the controller has a public BD_ADDR, then by default use that one.
3116 * If this is a LE only controller without a public address, default to
3117 * the static random address.
3118 *
3119 * For debugging purposes it is possible to force controllers with a
3120 * public address to use the static random address instead.
50b5b952
MH
3121 *
3122 * In case BR/EDR has been disabled on a dual-mode controller and
3123 * userspace has configured a static address, then that address
3124 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3125 */
3126void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3127 u8 *bdaddr_type)
3128{
b7cb93e5 3129 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3130 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3131 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3132 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3133 bacpy(bdaddr, &hdev->static_addr);
3134 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3135 } else {
3136 bacpy(bdaddr, &hdev->bdaddr);
3137 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3138 }
3139}
3140
9be0dab7
DH
3141/* Alloc HCI device */
3142struct hci_dev *hci_alloc_dev(void)
3143{
3144 struct hci_dev *hdev;
3145
27f70f3e 3146 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3147 if (!hdev)
3148 return NULL;
3149
b1b813d4
DH
3150 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3151 hdev->esco_type = (ESCO_HV1);
3152 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3153 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3154 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3155 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3156 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3157 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3158 hdev->adv_instance_cnt = 0;
3159 hdev->cur_adv_instance = 0x00;
5d900e46 3160 hdev->adv_instance_timeout = 0;
b1b813d4 3161
b1b813d4
DH
3162 hdev->sniff_max_interval = 800;
3163 hdev->sniff_min_interval = 80;
3164
3f959d46 3165 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3166 hdev->le_adv_min_interval = 0x0800;
3167 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3168 hdev->le_scan_interval = 0x0060;
3169 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3170 hdev->le_conn_min_interval = 0x0028;
3171 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3172 hdev->le_conn_latency = 0x0000;
3173 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3174 hdev->le_def_tx_len = 0x001b;
3175 hdev->le_def_tx_time = 0x0148;
3176 hdev->le_max_tx_len = 0x001b;
3177 hdev->le_max_tx_time = 0x0148;
3178 hdev->le_max_rx_len = 0x001b;
3179 hdev->le_max_rx_time = 0x0148;
bef64738 3180
d6bfd59c 3181 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3182 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3183 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3184 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3185
b1b813d4
DH
3186 mutex_init(&hdev->lock);
3187 mutex_init(&hdev->req_lock);
3188
3189 INIT_LIST_HEAD(&hdev->mgmt_pending);
3190 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3191 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3192 INIT_LIST_HEAD(&hdev->uuids);
3193 INIT_LIST_HEAD(&hdev->link_keys);
3194 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3195 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3196 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3197 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3198 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3199 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3200 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3201 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3202 INIT_LIST_HEAD(&hdev->adv_instances);
b1b813d4
DH
3203
3204 INIT_WORK(&hdev->rx_work, hci_rx_work);
3205 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3206 INIT_WORK(&hdev->tx_work, hci_tx_work);
3207 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3208 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3209
b1b813d4
DH
3210 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3211 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3212 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2d28cfe7 3213 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
5d900e46 3214 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
b1b813d4 3215
b1b813d4
DH
3216 skb_queue_head_init(&hdev->rx_q);
3217 skb_queue_head_init(&hdev->cmd_q);
3218 skb_queue_head_init(&hdev->raw_q);
3219
3220 init_waitqueue_head(&hdev->req_wait_q);
3221
65cc2b49 3222 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3223
b1b813d4
DH
3224 hci_init_sysfs(hdev);
3225 discovery_init(hdev);
9be0dab7
DH
3226
3227 return hdev;
3228}
3229EXPORT_SYMBOL(hci_alloc_dev);
3230
3231/* Free HCI device */
3232void hci_free_dev(struct hci_dev *hdev)
3233{
9be0dab7
DH
3234 /* will free via device release */
3235 put_device(&hdev->dev);
3236}
3237EXPORT_SYMBOL(hci_free_dev);
3238
1da177e4
LT
3239/* Register HCI device */
3240int hci_register_dev(struct hci_dev *hdev)
3241{
b1b813d4 3242 int id, error;
1da177e4 3243
74292d5a 3244 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3245 return -EINVAL;
3246
08add513
MM
3247 /* Do not allow HCI_AMP devices to register at index 0,
3248 * so the index can be used as the AMP controller ID.
3249 */
3df92b31
SL
3250 switch (hdev->dev_type) {
3251 case HCI_BREDR:
3252 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3253 break;
3254 case HCI_AMP:
3255 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3256 break;
3257 default:
3258 return -EINVAL;
1da177e4 3259 }
8e87d142 3260
3df92b31
SL
3261 if (id < 0)
3262 return id;
3263
1da177e4
LT
3264 sprintf(hdev->name, "hci%d", id);
3265 hdev->id = id;
2d8b3a11
AE
3266
3267 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3268
d8537548
KC
3269 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3270 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3271 if (!hdev->workqueue) {
3272 error = -ENOMEM;
3273 goto err;
3274 }
f48fd9c8 3275
d8537548
KC
3276 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3277 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3278 if (!hdev->req_workqueue) {
3279 destroy_workqueue(hdev->workqueue);
3280 error = -ENOMEM;
3281 goto err;
3282 }
3283
0153e2ec
MH
3284 if (!IS_ERR_OR_NULL(bt_debugfs))
3285 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3286
bdc3e0f1
MH
3287 dev_set_name(&hdev->dev, "%s", hdev->name);
3288
3289 error = device_add(&hdev->dev);
33ca954d 3290 if (error < 0)
54506918 3291 goto err_wqueue;
1da177e4 3292
611b30f7 3293 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3294 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3295 hdev);
611b30f7
MH
3296 if (hdev->rfkill) {
3297 if (rfkill_register(hdev->rfkill) < 0) {
3298 rfkill_destroy(hdev->rfkill);
3299 hdev->rfkill = NULL;
3300 }
3301 }
3302
5e130367 3303 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3304 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3305
a1536da2
MH
3306 hci_dev_set_flag(hdev, HCI_SETUP);
3307 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3308
01cd3404 3309 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3310 /* Assume BR/EDR support until proven otherwise (such as
3311 * through reading supported features during init.
3312 */
a1536da2 3313 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3314 }
ce2be9ac 3315
fcee3377
GP
3316 write_lock(&hci_dev_list_lock);
3317 list_add(&hdev->list, &hci_dev_list);
3318 write_unlock(&hci_dev_list_lock);
3319
4a964404
MH
3320 /* Devices that are marked for raw-only usage are unconfigured
3321 * and should not be included in normal operation.
fee746b0
MH
3322 */
3323 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3324 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3325
1da177e4 3326 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3327 hci_dev_hold(hdev);
1da177e4 3328
19202573 3329 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3330
1da177e4 3331 return id;
f48fd9c8 3332
33ca954d
DH
3333err_wqueue:
3334 destroy_workqueue(hdev->workqueue);
6ead1bbc 3335 destroy_workqueue(hdev->req_workqueue);
33ca954d 3336err:
3df92b31 3337 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3338
33ca954d 3339 return error;
1da177e4
LT
3340}
3341EXPORT_SYMBOL(hci_register_dev);
3342
3343/* Unregister HCI device */
59735631 3344void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3345{
2d7cc19e 3346 int id;
ef222013 3347
c13854ce 3348 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3349
a1536da2 3350 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3351
3df92b31
SL
3352 id = hdev->id;
3353
f20d09d5 3354 write_lock(&hci_dev_list_lock);
1da177e4 3355 list_del(&hdev->list);
f20d09d5 3356 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3357
3358 hci_dev_do_close(hdev);
3359
b9b5ef18
GP
3360 cancel_work_sync(&hdev->power_on);
3361
ab81cbf9 3362 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3363 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3364 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3365 hci_dev_lock(hdev);
744cf19e 3366 mgmt_index_removed(hdev);
09fd0de5 3367 hci_dev_unlock(hdev);
56e5cb86 3368 }
ab81cbf9 3369
2e58ef3e
JH
3370 /* mgmt_index_removed should take care of emptying the
3371 * pending list */
3372 BUG_ON(!list_empty(&hdev->mgmt_pending));
3373
1da177e4
LT
3374 hci_notify(hdev, HCI_DEV_UNREG);
3375
611b30f7
MH
3376 if (hdev->rfkill) {
3377 rfkill_unregister(hdev->rfkill);
3378 rfkill_destroy(hdev->rfkill);
3379 }
3380
bdc3e0f1 3381 device_del(&hdev->dev);
147e2d59 3382
0153e2ec
MH
3383 debugfs_remove_recursive(hdev->debugfs);
3384
f48fd9c8 3385 destroy_workqueue(hdev->workqueue);
6ead1bbc 3386 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3387
09fd0de5 3388 hci_dev_lock(hdev);
dcc36c16 3389 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3390 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3391 hci_uuids_clear(hdev);
55ed8ca1 3392 hci_link_keys_clear(hdev);
b899efaf 3393 hci_smp_ltks_clear(hdev);
970c4e46 3394 hci_smp_irks_clear(hdev);
2763eda6 3395 hci_remote_oob_data_clear(hdev);
d2609b34 3396 hci_adv_instances_clear(hdev);
dcc36c16 3397 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3398 hci_conn_params_clear_all(hdev);
22078800 3399 hci_discovery_filter_clear(hdev);
09fd0de5 3400 hci_dev_unlock(hdev);
e2e0cacb 3401
dc946bd8 3402 hci_dev_put(hdev);
3df92b31
SL
3403
3404 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3405}
3406EXPORT_SYMBOL(hci_unregister_dev);
3407
3408/* Suspend HCI device */
3409int hci_suspend_dev(struct hci_dev *hdev)
3410{
3411 hci_notify(hdev, HCI_DEV_SUSPEND);
3412 return 0;
3413}
3414EXPORT_SYMBOL(hci_suspend_dev);
3415
3416/* Resume HCI device */
3417int hci_resume_dev(struct hci_dev *hdev)
3418{
3419 hci_notify(hdev, HCI_DEV_RESUME);
3420 return 0;
3421}
3422EXPORT_SYMBOL(hci_resume_dev);
3423
75e0569f
MH
3424/* Reset HCI device */
3425int hci_reset_dev(struct hci_dev *hdev)
3426{
3427 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3428 struct sk_buff *skb;
3429
3430 skb = bt_skb_alloc(3, GFP_ATOMIC);
3431 if (!skb)
3432 return -ENOMEM;
3433
3434 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3435 memcpy(skb_put(skb, 3), hw_err, 3);
3436
3437 /* Send Hardware Error to upper stack */
3438 return hci_recv_frame(hdev, skb);
3439}
3440EXPORT_SYMBOL(hci_reset_dev);
3441
76bca880 3442/* Receive frame from HCI drivers */
e1a26170 3443int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3444{
76bca880 3445 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3446 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3447 kfree_skb(skb);
3448 return -ENXIO;
3449 }
3450
d82603c6 3451 /* Incoming skb */
76bca880
MH
3452 bt_cb(skb)->incoming = 1;
3453
3454 /* Time stamp */
3455 __net_timestamp(skb);
3456
76bca880 3457 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3458 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3459
76bca880
MH
3460 return 0;
3461}
3462EXPORT_SYMBOL(hci_recv_frame);
3463
1da177e4
LT
3464/* ---- Interface to upper protocols ---- */
3465
1da177e4
LT
3466int hci_register_cb(struct hci_cb *cb)
3467{
3468 BT_DBG("%p name %s", cb, cb->name);
3469
fba7ecf0 3470 mutex_lock(&hci_cb_list_lock);
00629e0f 3471 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3472 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3473
3474 return 0;
3475}
3476EXPORT_SYMBOL(hci_register_cb);
3477
3478int hci_unregister_cb(struct hci_cb *cb)
3479{
3480 BT_DBG("%p name %s", cb, cb->name);
3481
fba7ecf0 3482 mutex_lock(&hci_cb_list_lock);
1da177e4 3483 list_del(&cb->list);
fba7ecf0 3484 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3485
3486 return 0;
3487}
3488EXPORT_SYMBOL(hci_unregister_cb);
3489
51086991 3490static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3491{
cdc52faa
MH
3492 int err;
3493
0d48d939 3494 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3495
cd82e61c
MH
3496 /* Time stamp */
3497 __net_timestamp(skb);
1da177e4 3498
cd82e61c
MH
3499 /* Send copy to monitor */
3500 hci_send_to_monitor(hdev, skb);
3501
3502 if (atomic_read(&hdev->promisc)) {
3503 /* Send copy to the sockets */
470fe1b5 3504 hci_send_to_sock(hdev, skb);
1da177e4
LT
3505 }
3506
3507 /* Get rid of skb owner, prior to sending to the driver. */
3508 skb_orphan(skb);
3509
cdc52faa
MH
3510 err = hdev->send(hdev, skb);
3511 if (err < 0) {
3512 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3513 kfree_skb(skb);
3514 }
1da177e4
LT
3515}
3516
1ca3a9d0 3517/* Send HCI command */
07dc93dd
JH
3518int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3519 const void *param)
1ca3a9d0
JH
3520{
3521 struct sk_buff *skb;
3522
3523 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3524
3525 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3526 if (!skb) {
3527 BT_ERR("%s no memory for command", hdev->name);
3528 return -ENOMEM;
3529 }
3530
49c922bb 3531 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3532 * single-command requests.
3533 */
db6e3e8d 3534 bt_cb(skb)->req.start = true;
11714b3d 3535
1da177e4 3536 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3537 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3538
3539 return 0;
3540}
1da177e4
LT
3541
3542/* Get data from the previously sent command */
a9de9248 3543void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3544{
3545 struct hci_command_hdr *hdr;
3546
3547 if (!hdev->sent_cmd)
3548 return NULL;
3549
3550 hdr = (void *) hdev->sent_cmd->data;
3551
a9de9248 3552 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3553 return NULL;
3554
f0e09510 3555 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3556
3557 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3558}
3559
3560/* Send ACL data */
3561static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3562{
3563 struct hci_acl_hdr *hdr;
3564 int len = skb->len;
3565
badff6d0
ACM
3566 skb_push(skb, HCI_ACL_HDR_SIZE);
3567 skb_reset_transport_header(skb);
9c70220b 3568 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3569 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3570 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3571}
3572
ee22be7e 3573static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3574 struct sk_buff *skb, __u16 flags)
1da177e4 3575{
ee22be7e 3576 struct hci_conn *conn = chan->conn;
1da177e4
LT
3577 struct hci_dev *hdev = conn->hdev;
3578 struct sk_buff *list;
3579
087bfd99
GP
3580 skb->len = skb_headlen(skb);
3581 skb->data_len = 0;
3582
3583 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3584
3585 switch (hdev->dev_type) {
3586 case HCI_BREDR:
3587 hci_add_acl_hdr(skb, conn->handle, flags);
3588 break;
3589 case HCI_AMP:
3590 hci_add_acl_hdr(skb, chan->handle, flags);
3591 break;
3592 default:
3593 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3594 return;
3595 }
087bfd99 3596
70f23020
AE
3597 list = skb_shinfo(skb)->frag_list;
3598 if (!list) {
1da177e4
LT
3599 /* Non fragmented */
3600 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3601
73d80deb 3602 skb_queue_tail(queue, skb);
1da177e4
LT
3603 } else {
3604 /* Fragmented */
3605 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3606
3607 skb_shinfo(skb)->frag_list = NULL;
3608
9cfd5a23
JR
3609 /* Queue all fragments atomically. We need to use spin_lock_bh
3610 * here because of 6LoWPAN links, as there this function is
3611 * called from softirq and using normal spin lock could cause
3612 * deadlocks.
3613 */
3614 spin_lock_bh(&queue->lock);
1da177e4 3615
73d80deb 3616 __skb_queue_tail(queue, skb);
e702112f
AE
3617
3618 flags &= ~ACL_START;
3619 flags |= ACL_CONT;
1da177e4
LT
3620 do {
3621 skb = list; list = list->next;
8e87d142 3622
0d48d939 3623 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3624 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3625
3626 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3627
73d80deb 3628 __skb_queue_tail(queue, skb);
1da177e4
LT
3629 } while (list);
3630
9cfd5a23 3631 spin_unlock_bh(&queue->lock);
1da177e4 3632 }
73d80deb
LAD
3633}
3634
3635void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3636{
ee22be7e 3637 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3638
f0e09510 3639 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3640
ee22be7e 3641 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3642
3eff45ea 3643 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3644}
1da177e4
LT
3645
3646/* Send SCO data */
0d861d8b 3647void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3648{
3649 struct hci_dev *hdev = conn->hdev;
3650 struct hci_sco_hdr hdr;
3651
3652 BT_DBG("%s len %d", hdev->name, skb->len);
3653
aca3192c 3654 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3655 hdr.dlen = skb->len;
3656
badff6d0
ACM
3657 skb_push(skb, HCI_SCO_HDR_SIZE);
3658 skb_reset_transport_header(skb);
9c70220b 3659 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3660
0d48d939 3661 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3662
1da177e4 3663 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3664 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3665}
1da177e4
LT
3666
3667/* ---- HCI TX task (outgoing data) ---- */
3668
3669/* HCI Connection scheduler */
6039aa73
GP
3670static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3671 int *quote)
1da177e4
LT
3672{
3673 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3674 struct hci_conn *conn = NULL, *c;
abc5de8f 3675 unsigned int num = 0, min = ~0;
1da177e4 3676
8e87d142 3677 /* We don't have to lock device here. Connections are always
1da177e4 3678 * added and removed with TX task disabled. */
bf4c6325
GP
3679
3680 rcu_read_lock();
3681
3682 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3683 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3684 continue;
769be974
MH
3685
3686 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3687 continue;
3688
1da177e4
LT
3689 num++;
3690
3691 if (c->sent < min) {
3692 min = c->sent;
3693 conn = c;
3694 }
52087a79
LAD
3695
3696 if (hci_conn_num(hdev, type) == num)
3697 break;
1da177e4
LT
3698 }
3699
bf4c6325
GP
3700 rcu_read_unlock();
3701
1da177e4 3702 if (conn) {
6ed58ec5
VT
3703 int cnt, q;
3704
3705 switch (conn->type) {
3706 case ACL_LINK:
3707 cnt = hdev->acl_cnt;
3708 break;
3709 case SCO_LINK:
3710 case ESCO_LINK:
3711 cnt = hdev->sco_cnt;
3712 break;
3713 case LE_LINK:
3714 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3715 break;
3716 default:
3717 cnt = 0;
3718 BT_ERR("Unknown link type");
3719 }
3720
3721 q = cnt / num;
1da177e4
LT
3722 *quote = q ? q : 1;
3723 } else
3724 *quote = 0;
3725
3726 BT_DBG("conn %p quote %d", conn, *quote);
3727 return conn;
3728}
3729
6039aa73 3730static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3731{
3732 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3733 struct hci_conn *c;
1da177e4 3734
bae1f5d9 3735 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3736
bf4c6325
GP
3737 rcu_read_lock();
3738
1da177e4 3739 /* Kill stalled connections */
bf4c6325 3740 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3741 if (c->type == type && c->sent) {
6ed93dc6
AE
3742 BT_ERR("%s killing stalled connection %pMR",
3743 hdev->name, &c->dst);
bed71748 3744 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3745 }
3746 }
bf4c6325
GP
3747
3748 rcu_read_unlock();
1da177e4
LT
3749}
3750
6039aa73
GP
3751static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3752 int *quote)
1da177e4 3753{
73d80deb
LAD
3754 struct hci_conn_hash *h = &hdev->conn_hash;
3755 struct hci_chan *chan = NULL;
abc5de8f 3756 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3757 struct hci_conn *conn;
73d80deb
LAD
3758 int cnt, q, conn_num = 0;
3759
3760 BT_DBG("%s", hdev->name);
3761
bf4c6325
GP
3762 rcu_read_lock();
3763
3764 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3765 struct hci_chan *tmp;
3766
3767 if (conn->type != type)
3768 continue;
3769
3770 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3771 continue;
3772
3773 conn_num++;
3774
8192edef 3775 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3776 struct sk_buff *skb;
3777
3778 if (skb_queue_empty(&tmp->data_q))
3779 continue;
3780
3781 skb = skb_peek(&tmp->data_q);
3782 if (skb->priority < cur_prio)
3783 continue;
3784
3785 if (skb->priority > cur_prio) {
3786 num = 0;
3787 min = ~0;
3788 cur_prio = skb->priority;
3789 }
3790
3791 num++;
3792
3793 if (conn->sent < min) {
3794 min = conn->sent;
3795 chan = tmp;
3796 }
3797 }
3798
3799 if (hci_conn_num(hdev, type) == conn_num)
3800 break;
3801 }
3802
bf4c6325
GP
3803 rcu_read_unlock();
3804
73d80deb
LAD
3805 if (!chan)
3806 return NULL;
3807
3808 switch (chan->conn->type) {
3809 case ACL_LINK:
3810 cnt = hdev->acl_cnt;
3811 break;
bd1eb66b
AE
3812 case AMP_LINK:
3813 cnt = hdev->block_cnt;
3814 break;
73d80deb
LAD
3815 case SCO_LINK:
3816 case ESCO_LINK:
3817 cnt = hdev->sco_cnt;
3818 break;
3819 case LE_LINK:
3820 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3821 break;
3822 default:
3823 cnt = 0;
3824 BT_ERR("Unknown link type");
3825 }
3826
3827 q = cnt / num;
3828 *quote = q ? q : 1;
3829 BT_DBG("chan %p quote %d", chan, *quote);
3830 return chan;
3831}
3832
02b20f0b
LAD
3833static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3834{
3835 struct hci_conn_hash *h = &hdev->conn_hash;
3836 struct hci_conn *conn;
3837 int num = 0;
3838
3839 BT_DBG("%s", hdev->name);
3840
bf4c6325
GP
3841 rcu_read_lock();
3842
3843 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3844 struct hci_chan *chan;
3845
3846 if (conn->type != type)
3847 continue;
3848
3849 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3850 continue;
3851
3852 num++;
3853
8192edef 3854 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3855 struct sk_buff *skb;
3856
3857 if (chan->sent) {
3858 chan->sent = 0;
3859 continue;
3860 }
3861
3862 if (skb_queue_empty(&chan->data_q))
3863 continue;
3864
3865 skb = skb_peek(&chan->data_q);
3866 if (skb->priority >= HCI_PRIO_MAX - 1)
3867 continue;
3868
3869 skb->priority = HCI_PRIO_MAX - 1;
3870
3871 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3872 skb->priority);
02b20f0b
LAD
3873 }
3874
3875 if (hci_conn_num(hdev, type) == num)
3876 break;
3877 }
bf4c6325
GP
3878
3879 rcu_read_unlock();
3880
02b20f0b
LAD
3881}
3882
b71d385a
AE
3883static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3884{
3885 /* Calculate count of blocks used by this packet */
3886 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3887}
3888
6039aa73 3889static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3890{
d7a5a11d 3891 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
3892 /* ACL tx timeout must be longer than maximum
3893 * link supervision timeout (40.9 seconds) */
63d2bc1b 3894 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3895 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3896 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3897 }
63d2bc1b 3898}
1da177e4 3899
6039aa73 3900static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3901{
3902 unsigned int cnt = hdev->acl_cnt;
3903 struct hci_chan *chan;
3904 struct sk_buff *skb;
3905 int quote;
3906
3907 __check_timeout(hdev, cnt);
04837f64 3908
73d80deb 3909 while (hdev->acl_cnt &&
a8c5fb1a 3910 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3911 u32 priority = (skb_peek(&chan->data_q))->priority;
3912 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3913 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3914 skb->len, skb->priority);
73d80deb 3915
ec1cce24
LAD
3916 /* Stop if priority has changed */
3917 if (skb->priority < priority)
3918 break;
3919
3920 skb = skb_dequeue(&chan->data_q);
3921
73d80deb 3922 hci_conn_enter_active_mode(chan->conn,
04124681 3923 bt_cb(skb)->force_active);
04837f64 3924
57d17d70 3925 hci_send_frame(hdev, skb);
1da177e4
LT
3926 hdev->acl_last_tx = jiffies;
3927
3928 hdev->acl_cnt--;
73d80deb
LAD
3929 chan->sent++;
3930 chan->conn->sent++;
1da177e4
LT
3931 }
3932 }
02b20f0b
LAD
3933
3934 if (cnt != hdev->acl_cnt)
3935 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3936}
3937
6039aa73 3938static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3939{
63d2bc1b 3940 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3941 struct hci_chan *chan;
3942 struct sk_buff *skb;
3943 int quote;
bd1eb66b 3944 u8 type;
b71d385a 3945
63d2bc1b 3946 __check_timeout(hdev, cnt);
b71d385a 3947
bd1eb66b
AE
3948 BT_DBG("%s", hdev->name);
3949
3950 if (hdev->dev_type == HCI_AMP)
3951 type = AMP_LINK;
3952 else
3953 type = ACL_LINK;
3954
b71d385a 3955 while (hdev->block_cnt > 0 &&
bd1eb66b 3956 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3957 u32 priority = (skb_peek(&chan->data_q))->priority;
3958 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3959 int blocks;
3960
3961 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3962 skb->len, skb->priority);
b71d385a
AE
3963
3964 /* Stop if priority has changed */
3965 if (skb->priority < priority)
3966 break;
3967
3968 skb = skb_dequeue(&chan->data_q);
3969
3970 blocks = __get_blocks(hdev, skb);
3971 if (blocks > hdev->block_cnt)
3972 return;
3973
3974 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3975 bt_cb(skb)->force_active);
b71d385a 3976
57d17d70 3977 hci_send_frame(hdev, skb);
b71d385a
AE
3978 hdev->acl_last_tx = jiffies;
3979
3980 hdev->block_cnt -= blocks;
3981 quote -= blocks;
3982
3983 chan->sent += blocks;
3984 chan->conn->sent += blocks;
3985 }
3986 }
3987
3988 if (cnt != hdev->block_cnt)
bd1eb66b 3989 hci_prio_recalculate(hdev, type);
b71d385a
AE
3990}
3991
6039aa73 3992static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3993{
3994 BT_DBG("%s", hdev->name);
3995
bd1eb66b
AE
3996 /* No ACL link over BR/EDR controller */
3997 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3998 return;
3999
4000 /* No AMP link over AMP controller */
4001 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4002 return;
4003
4004 switch (hdev->flow_ctl_mode) {
4005 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4006 hci_sched_acl_pkt(hdev);
4007 break;
4008
4009 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4010 hci_sched_acl_blk(hdev);
4011 break;
4012 }
4013}
4014
1da177e4 4015/* Schedule SCO */
6039aa73 4016static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4017{
4018 struct hci_conn *conn;
4019 struct sk_buff *skb;
4020 int quote;
4021
4022 BT_DBG("%s", hdev->name);
4023
52087a79
LAD
4024 if (!hci_conn_num(hdev, SCO_LINK))
4025 return;
4026
1da177e4
LT
4027 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4028 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4029 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4030 hci_send_frame(hdev, skb);
1da177e4
LT
4031
4032 conn->sent++;
4033 if (conn->sent == ~0)
4034 conn->sent = 0;
4035 }
4036 }
4037}
4038
6039aa73 4039static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4040{
4041 struct hci_conn *conn;
4042 struct sk_buff *skb;
4043 int quote;
4044
4045 BT_DBG("%s", hdev->name);
4046
52087a79
LAD
4047 if (!hci_conn_num(hdev, ESCO_LINK))
4048 return;
4049
8fc9ced3
GP
4050 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4051 &quote))) {
b6a0dc82
MH
4052 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4053 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4054 hci_send_frame(hdev, skb);
b6a0dc82
MH
4055
4056 conn->sent++;
4057 if (conn->sent == ~0)
4058 conn->sent = 0;
4059 }
4060 }
4061}
4062
6039aa73 4063static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4064{
73d80deb 4065 struct hci_chan *chan;
6ed58ec5 4066 struct sk_buff *skb;
02b20f0b 4067 int quote, cnt, tmp;
6ed58ec5
VT
4068
4069 BT_DBG("%s", hdev->name);
4070
52087a79
LAD
4071 if (!hci_conn_num(hdev, LE_LINK))
4072 return;
4073
d7a5a11d 4074 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
4075 /* LE tx timeout must be longer than maximum
4076 * link supervision timeout (40.9 seconds) */
bae1f5d9 4077 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4078 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4079 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4080 }
4081
4082 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4083 tmp = cnt;
73d80deb 4084 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4085 u32 priority = (skb_peek(&chan->data_q))->priority;
4086 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4087 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4088 skb->len, skb->priority);
6ed58ec5 4089
ec1cce24
LAD
4090 /* Stop if priority has changed */
4091 if (skb->priority < priority)
4092 break;
4093
4094 skb = skb_dequeue(&chan->data_q);
4095
57d17d70 4096 hci_send_frame(hdev, skb);
6ed58ec5
VT
4097 hdev->le_last_tx = jiffies;
4098
4099 cnt--;
73d80deb
LAD
4100 chan->sent++;
4101 chan->conn->sent++;
6ed58ec5
VT
4102 }
4103 }
73d80deb 4104
6ed58ec5
VT
4105 if (hdev->le_pkts)
4106 hdev->le_cnt = cnt;
4107 else
4108 hdev->acl_cnt = cnt;
02b20f0b
LAD
4109
4110 if (cnt != tmp)
4111 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4112}
4113
3eff45ea 4114static void hci_tx_work(struct work_struct *work)
1da177e4 4115{
3eff45ea 4116 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4117 struct sk_buff *skb;
4118
6ed58ec5 4119 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4120 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4121
d7a5a11d 4122 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
4123 /* Schedule queues and send stuff to HCI driver */
4124 hci_sched_acl(hdev);
4125 hci_sched_sco(hdev);
4126 hci_sched_esco(hdev);
4127 hci_sched_le(hdev);
4128 }
6ed58ec5 4129
1da177e4
LT
4130 /* Send next queued raw (unknown type) packet */
4131 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4132 hci_send_frame(hdev, skb);
1da177e4
LT
4133}
4134
25985edc 4135/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4136
4137/* ACL data packet */
6039aa73 4138static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4139{
4140 struct hci_acl_hdr *hdr = (void *) skb->data;
4141 struct hci_conn *conn;
4142 __u16 handle, flags;
4143
4144 skb_pull(skb, HCI_ACL_HDR_SIZE);
4145
4146 handle = __le16_to_cpu(hdr->handle);
4147 flags = hci_flags(handle);
4148 handle = hci_handle(handle);
4149
f0e09510 4150 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4151 handle, flags);
1da177e4
LT
4152
4153 hdev->stat.acl_rx++;
4154
4155 hci_dev_lock(hdev);
4156 conn = hci_conn_hash_lookup_handle(hdev, handle);
4157 hci_dev_unlock(hdev);
8e87d142 4158
1da177e4 4159 if (conn) {
65983fc7 4160 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4161
1da177e4 4162 /* Send to upper protocol */
686ebf28
UF
4163 l2cap_recv_acldata(conn, skb, flags);
4164 return;
1da177e4 4165 } else {
8e87d142 4166 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4167 hdev->name, handle);
1da177e4
LT
4168 }
4169
4170 kfree_skb(skb);
4171}
4172
4173/* SCO data packet */
6039aa73 4174static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4175{
4176 struct hci_sco_hdr *hdr = (void *) skb->data;
4177 struct hci_conn *conn;
4178 __u16 handle;
4179
4180 skb_pull(skb, HCI_SCO_HDR_SIZE);
4181
4182 handle = __le16_to_cpu(hdr->handle);
4183
f0e09510 4184 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4185
4186 hdev->stat.sco_rx++;
4187
4188 hci_dev_lock(hdev);
4189 conn = hci_conn_hash_lookup_handle(hdev, handle);
4190 hci_dev_unlock(hdev);
4191
4192 if (conn) {
1da177e4 4193 /* Send to upper protocol */
686ebf28
UF
4194 sco_recv_scodata(conn, skb);
4195 return;
1da177e4 4196 } else {
8e87d142 4197 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4198 hdev->name, handle);
1da177e4
LT
4199 }
4200
4201 kfree_skb(skb);
4202}
4203
9238f36a
JH
4204static bool hci_req_is_complete(struct hci_dev *hdev)
4205{
4206 struct sk_buff *skb;
4207
4208 skb = skb_peek(&hdev->cmd_q);
4209 if (!skb)
4210 return true;
4211
db6e3e8d 4212 return bt_cb(skb)->req.start;
9238f36a
JH
4213}
4214
42c6b129
JH
4215static void hci_resend_last(struct hci_dev *hdev)
4216{
4217 struct hci_command_hdr *sent;
4218 struct sk_buff *skb;
4219 u16 opcode;
4220
4221 if (!hdev->sent_cmd)
4222 return;
4223
4224 sent = (void *) hdev->sent_cmd->data;
4225 opcode = __le16_to_cpu(sent->opcode);
4226 if (opcode == HCI_OP_RESET)
4227 return;
4228
4229 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4230 if (!skb)
4231 return;
4232
4233 skb_queue_head(&hdev->cmd_q, skb);
4234 queue_work(hdev->workqueue, &hdev->cmd_work);
4235}
4236
e6214487
JH
4237void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4238 hci_req_complete_t *req_complete,
4239 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4240{
9238f36a
JH
4241 struct sk_buff *skb;
4242 unsigned long flags;
4243
4244 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4245
42c6b129
JH
4246 /* If the completed command doesn't match the last one that was
4247 * sent we need to do special handling of it.
9238f36a 4248 */
42c6b129
JH
4249 if (!hci_sent_cmd_data(hdev, opcode)) {
4250 /* Some CSR based controllers generate a spontaneous
4251 * reset complete event during init and any pending
4252 * command will never be completed. In such a case we
4253 * need to resend whatever was the last sent
4254 * command.
4255 */
4256 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4257 hci_resend_last(hdev);
4258
9238f36a 4259 return;
42c6b129 4260 }
9238f36a
JH
4261
4262 /* If the command succeeded and there's still more commands in
4263 * this request the request is not yet complete.
4264 */
4265 if (!status && !hci_req_is_complete(hdev))
4266 return;
4267
4268 /* If this was the last command in a request the complete
4269 * callback would be found in hdev->sent_cmd instead of the
4270 * command queue (hdev->cmd_q).
4271 */
e6214487
JH
4272 if (bt_cb(hdev->sent_cmd)->req.complete) {
4273 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4274 return;
4275 }
53e21fbc 4276
e6214487
JH
4277 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4278 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4279 return;
9238f36a
JH
4280 }
4281
4282 /* Remove all pending commands belonging to this request */
4283 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4284 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
db6e3e8d 4285 if (bt_cb(skb)->req.start) {
9238f36a
JH
4286 __skb_queue_head(&hdev->cmd_q, skb);
4287 break;
4288 }
4289
e6214487
JH
4290 *req_complete = bt_cb(skb)->req.complete;
4291 *req_complete_skb = bt_cb(skb)->req.complete_skb;
9238f36a
JH
4292 kfree_skb(skb);
4293 }
4294 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4295}
4296
b78752cc 4297static void hci_rx_work(struct work_struct *work)
1da177e4 4298{
b78752cc 4299 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4300 struct sk_buff *skb;
4301
4302 BT_DBG("%s", hdev->name);
4303
1da177e4 4304 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4305 /* Send copy to monitor */
4306 hci_send_to_monitor(hdev, skb);
4307
1da177e4
LT
4308 if (atomic_read(&hdev->promisc)) {
4309 /* Send copy to the sockets */
470fe1b5 4310 hci_send_to_sock(hdev, skb);
1da177e4
LT
4311 }
4312
d7a5a11d 4313 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4314 kfree_skb(skb);
4315 continue;
4316 }
4317
4318 if (test_bit(HCI_INIT, &hdev->flags)) {
4319 /* Don't process data packets in this states. */
0d48d939 4320 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4321 case HCI_ACLDATA_PKT:
4322 case HCI_SCODATA_PKT:
4323 kfree_skb(skb);
4324 continue;
3ff50b79 4325 }
1da177e4
LT
4326 }
4327
4328 /* Process frame */
0d48d939 4329 switch (bt_cb(skb)->pkt_type) {
1da177e4 4330 case HCI_EVENT_PKT:
b78752cc 4331 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4332 hci_event_packet(hdev, skb);
4333 break;
4334
4335 case HCI_ACLDATA_PKT:
4336 BT_DBG("%s ACL data packet", hdev->name);
4337 hci_acldata_packet(hdev, skb);
4338 break;
4339
4340 case HCI_SCODATA_PKT:
4341 BT_DBG("%s SCO data packet", hdev->name);
4342 hci_scodata_packet(hdev, skb);
4343 break;
4344
4345 default:
4346 kfree_skb(skb);
4347 break;
4348 }
4349 }
1da177e4
LT
4350}
4351
c347b765 4352static void hci_cmd_work(struct work_struct *work)
1da177e4 4353{
c347b765 4354 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4355 struct sk_buff *skb;
4356
2104786b
AE
4357 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4358 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4359
1da177e4 4360 /* Send queued commands */
5a08ecce
AE
4361 if (atomic_read(&hdev->cmd_cnt)) {
4362 skb = skb_dequeue(&hdev->cmd_q);
4363 if (!skb)
4364 return;
4365
7585b97a 4366 kfree_skb(hdev->sent_cmd);
1da177e4 4367
a675d7f1 4368 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4369 if (hdev->sent_cmd) {
1da177e4 4370 atomic_dec(&hdev->cmd_cnt);
57d17d70 4371 hci_send_frame(hdev, skb);
7bdb8a5c 4372 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4373 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4374 else
65cc2b49
MH
4375 schedule_delayed_work(&hdev->cmd_timer,
4376 HCI_CMD_TIMEOUT);
1da177e4
LT
4377 } else {
4378 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4379 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4380 }
4381 }
4382}
This page took 1.780881 seconds and 5 git commands to generate.