Bluetooth: Add second hci_request callback option for full skb
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46
JH
42#include "smp.h"
43
b78752cc 44static void hci_rx_work(struct work_struct *work);
c347b765 45static void hci_cmd_work(struct work_struct *work);
3eff45ea 46static void hci_tx_work(struct work_struct *work);
1da177e4 47
1da177e4
LT
48/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
fba7ecf0 54DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 55
3df92b31
SL
56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
899de765
MH
59/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
1da177e4
LT
68/* ---- HCI notifications ---- */
69
6516455d 70static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 71{
040030ef 72 hci_sock_dev_event(hdev, event);
1da177e4
LT
73}
74
baf27f6e
MH
75/* ---- HCI debugfs entries ---- */
76
4b4148e9
MH
77static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
b7cb93e5 83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
4b4148e9
MH
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
b7cb93e5 109 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
b7cb93e5 130 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
131
132 return count;
133}
134
135static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140};
141
1da177e4
LT
142/* ---- HCI requests ---- */
143
1904a853 144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
1da177e4 145{
42c6b129 146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
147
148 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE;
151 wake_up_interruptible(&hdev->req_wait_q);
152 }
153}
154
155static void hci_req_cancel(struct hci_dev *hdev, int err)
156{
157 BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159 if (hdev->req_status == HCI_REQ_PEND) {
160 hdev->req_result = err;
161 hdev->req_status = HCI_REQ_CANCELED;
162 wake_up_interruptible(&hdev->req_wait_q);
163 }
164}
165
77a63e0a
FW
166static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167 u8 event)
75e84b7c
JH
168{
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
171 struct sk_buff *skb;
172
173 hci_dev_lock(hdev);
174
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
177
178 hci_dev_unlock(hdev);
179
180 if (!skb)
181 return ERR_PTR(-ENODATA);
182
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
185 goto failed;
186 }
187
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
7b1abbbe
JH
191 if (event) {
192 if (hdr->evt != event)
193 goto failed;
194 return skb;
195 }
196
75e84b7c
JH
197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199 goto failed;
200 }
201
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
204 goto failed;
205 }
206
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
209
210 if (opcode == __le16_to_cpu(ev->opcode))
211 return skb;
212
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
215
216failed:
217 kfree_skb(skb);
218 return ERR_PTR(-ENODATA);
219}
220
7b1abbbe 221struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 222 const void *param, u8 event, u32 timeout)
75e84b7c
JH
223{
224 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req;
226 int err = 0;
227
228 BT_DBG("%s", hdev->name);
229
230 hci_req_init(&req, hdev);
231
7b1abbbe 232 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
233
234 hdev->req_status = HCI_REQ_PEND;
235
75e84b7c
JH
236 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE);
238
039fada5
CP
239 err = hci_req_run(&req, hci_req_sync_complete);
240 if (err < 0) {
241 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 242 set_current_state(TASK_RUNNING);
039fada5
CP
243 return ERR_PTR(err);
244 }
245
75e84b7c
JH
246 schedule_timeout(timeout);
247
248 remove_wait_queue(&hdev->req_wait_q, &wait);
249
250 if (signal_pending(current))
251 return ERR_PTR(-EINTR);
252
253 switch (hdev->req_status) {
254 case HCI_REQ_DONE:
255 err = -bt_to_errno(hdev->req_result);
256 break;
257
258 case HCI_REQ_CANCELED:
259 err = -hdev->req_result;
260 break;
261
262 default:
263 err = -ETIMEDOUT;
264 break;
265 }
266
267 hdev->req_status = hdev->req_result = 0;
268
269 BT_DBG("%s end: err %d", hdev->name, err);
270
271 if (err < 0)
272 return ERR_PTR(err);
273
7b1abbbe
JH
274 return hci_get_cmd_complete(hdev, opcode, event);
275}
276EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 279 const void *param, u32 timeout)
7b1abbbe
JH
280{
281 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
282}
283EXPORT_SYMBOL(__hci_cmd_sync);
284
1da177e4 285/* Execute request and wait for completion. */
01178cd4 286static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
287 void (*func)(struct hci_request *req,
288 unsigned long opt),
01178cd4 289 unsigned long opt, __u32 timeout)
1da177e4 290{
42c6b129 291 struct hci_request req;
1da177e4
LT
292 DECLARE_WAITQUEUE(wait, current);
293 int err = 0;
294
295 BT_DBG("%s start", hdev->name);
296
42c6b129
JH
297 hci_req_init(&req, hdev);
298
1da177e4
LT
299 hdev->req_status = HCI_REQ_PEND;
300
42c6b129 301 func(&req, opt);
53cce22d 302
039fada5
CP
303 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE);
305
42c6b129
JH
306 err = hci_req_run(&req, hci_req_sync_complete);
307 if (err < 0) {
53cce22d 308 hdev->req_status = 0;
920c8300 309
039fada5 310 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 311 set_current_state(TASK_RUNNING);
039fada5 312
920c8300
AG
313 /* ENODATA means the HCI request command queue is empty.
314 * This can happen when a request with conditionals doesn't
315 * trigger any commands to be sent. This is normal behavior
316 * and should not trigger an error return.
42c6b129 317 */
920c8300
AG
318 if (err == -ENODATA)
319 return 0;
320
321 return err;
53cce22d
JH
322 }
323
1da177e4
LT
324 schedule_timeout(timeout);
325
326 remove_wait_queue(&hdev->req_wait_q, &wait);
327
328 if (signal_pending(current))
329 return -EINTR;
330
331 switch (hdev->req_status) {
332 case HCI_REQ_DONE:
e175072f 333 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
334 break;
335
336 case HCI_REQ_CANCELED:
337 err = -hdev->req_result;
338 break;
339
340 default:
341 err = -ETIMEDOUT;
342 break;
3ff50b79 343 }
1da177e4 344
a5040efa 345 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
346
347 BT_DBG("%s end: err %d", hdev->name, err);
348
349 return err;
350}
351
01178cd4 352static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
353 void (*req)(struct hci_request *req,
354 unsigned long opt),
01178cd4 355 unsigned long opt, __u32 timeout)
1da177e4
LT
356{
357 int ret;
358
7c6a329e
MH
359 if (!test_bit(HCI_UP, &hdev->flags))
360 return -ENETDOWN;
361
1da177e4
LT
362 /* Serialize all requests */
363 hci_req_lock(hdev);
01178cd4 364 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
365 hci_req_unlock(hdev);
366
367 return ret;
368}
369
42c6b129 370static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 371{
42c6b129 372 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
373
374 /* Reset device */
42c6b129
JH
375 set_bit(HCI_RESET, &req->hdev->flags);
376 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
377}
378
42c6b129 379static void bredr_init(struct hci_request *req)
1da177e4 380{
42c6b129 381 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 382
1da177e4 383 /* Read Local Supported Features */
42c6b129 384 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 385
1143e5a6 386 /* Read Local Version */
42c6b129 387 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
388
389 /* Read BD Address */
42c6b129 390 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
391}
392
0af801b9 393static void amp_init1(struct hci_request *req)
e61ef499 394{
42c6b129 395 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 396
e61ef499 397 /* Read Local Version */
42c6b129 398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 399
f6996cfe
MH
400 /* Read Local Supported Commands */
401 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
6bcbc489 403 /* Read Local AMP Info */
42c6b129 404 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
405
406 /* Read Data Blk size */
42c6b129 407 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 408
f38ba941
MH
409 /* Read Flow Control Mode */
410 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
411
7528ca1c
MH
412 /* Read Location Data */
413 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
414}
415
0af801b9
JH
416static void amp_init2(struct hci_request *req)
417{
418 /* Read Local Supported Features. Not all AMP controllers
419 * support this so it's placed conditionally in the second
420 * stage init.
421 */
422 if (req->hdev->commands[14] & 0x20)
423 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
424}
425
42c6b129 426static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 427{
42c6b129 428 struct hci_dev *hdev = req->hdev;
e61ef499
AE
429
430 BT_DBG("%s %ld", hdev->name, opt);
431
11778716
AE
432 /* Reset */
433 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 434 hci_reset_req(req, 0);
11778716 435
e61ef499
AE
436 switch (hdev->dev_type) {
437 case HCI_BREDR:
42c6b129 438 bredr_init(req);
e61ef499
AE
439 break;
440
441 case HCI_AMP:
0af801b9 442 amp_init1(req);
e61ef499
AE
443 break;
444
445 default:
446 BT_ERR("Unknown device type %d", hdev->dev_type);
447 break;
448 }
e61ef499
AE
449}
450
42c6b129 451static void bredr_setup(struct hci_request *req)
2177bab5 452{
2177bab5
JH
453 __le16 param;
454 __u8 flt_type;
455
456 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 457 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
458
459 /* Read Class of Device */
42c6b129 460 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
461
462 /* Read Local Name */
42c6b129 463 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
464
465 /* Read Voice Setting */
42c6b129 466 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 467
b4cb9fb2
MH
468 /* Read Number of Supported IAC */
469 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
470
4b836f39
MH
471 /* Read Current IAC LAP */
472 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
473
2177bab5
JH
474 /* Clear Event Filters */
475 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 476 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
477
478 /* Connection accept timeout ~20 secs */
dcf4adbf 479 param = cpu_to_le16(0x7d00);
42c6b129 480 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
481}
482
42c6b129 483static void le_setup(struct hci_request *req)
2177bab5 484{
c73eee91
JH
485 struct hci_dev *hdev = req->hdev;
486
2177bab5 487 /* Read LE Buffer Size */
42c6b129 488 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
489
490 /* Read LE Local Supported Features */
42c6b129 491 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 492
747d3f03
MH
493 /* Read LE Supported States */
494 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
495
2177bab5 496 /* Read LE White List Size */
42c6b129 497 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 498
747d3f03
MH
499 /* Clear LE White List */
500 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
501
502 /* LE-only controllers have LE implicitly enabled */
503 if (!lmp_bredr_capable(hdev))
a1536da2 504 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
505}
506
42c6b129 507static void hci_setup_event_mask(struct hci_request *req)
2177bab5 508{
42c6b129
JH
509 struct hci_dev *hdev = req->hdev;
510
2177bab5
JH
511 /* The second byte is 0xff instead of 0x9f (two reserved bits
512 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
513 * command otherwise.
514 */
515 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
516
517 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
518 * any event mask for pre 1.2 devices.
519 */
520 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
521 return;
522
523 if (lmp_bredr_capable(hdev)) {
524 events[4] |= 0x01; /* Flow Specification Complete */
525 events[4] |= 0x02; /* Inquiry Result with RSSI */
526 events[4] |= 0x04; /* Read Remote Extended Features Complete */
527 events[5] |= 0x08; /* Synchronous Connection Complete */
528 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
529 } else {
530 /* Use a different default for LE-only devices */
531 memset(events, 0, sizeof(events));
532 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
533 events[1] |= 0x08; /* Read Remote Version Information Complete */
534 events[1] |= 0x20; /* Command Complete */
535 events[1] |= 0x40; /* Command Status */
536 events[1] |= 0x80; /* Hardware Error */
537 events[2] |= 0x04; /* Number of Completed Packets */
538 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
539
540 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
541 events[0] |= 0x80; /* Encryption Change */
542 events[5] |= 0x80; /* Encryption Key Refresh Complete */
543 }
2177bab5
JH
544 }
545
546 if (lmp_inq_rssi_capable(hdev))
547 events[4] |= 0x02; /* Inquiry Result with RSSI */
548
549 if (lmp_sniffsubr_capable(hdev))
550 events[5] |= 0x20; /* Sniff Subrating */
551
552 if (lmp_pause_enc_capable(hdev))
553 events[5] |= 0x80; /* Encryption Key Refresh Complete */
554
555 if (lmp_ext_inq_capable(hdev))
556 events[5] |= 0x40; /* Extended Inquiry Result */
557
558 if (lmp_no_flush_capable(hdev))
559 events[7] |= 0x01; /* Enhanced Flush Complete */
560
561 if (lmp_lsto_capable(hdev))
562 events[6] |= 0x80; /* Link Supervision Timeout Changed */
563
564 if (lmp_ssp_capable(hdev)) {
565 events[6] |= 0x01; /* IO Capability Request */
566 events[6] |= 0x02; /* IO Capability Response */
567 events[6] |= 0x04; /* User Confirmation Request */
568 events[6] |= 0x08; /* User Passkey Request */
569 events[6] |= 0x10; /* Remote OOB Data Request */
570 events[6] |= 0x20; /* Simple Pairing Complete */
571 events[7] |= 0x04; /* User Passkey Notification */
572 events[7] |= 0x08; /* Keypress Notification */
573 events[7] |= 0x10; /* Remote Host Supported
574 * Features Notification
575 */
576 }
577
578 if (lmp_le_capable(hdev))
579 events[7] |= 0x20; /* LE Meta-Event */
580
42c6b129 581 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
582}
583
42c6b129 584static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 585{
42c6b129
JH
586 struct hci_dev *hdev = req->hdev;
587
0af801b9
JH
588 if (hdev->dev_type == HCI_AMP)
589 return amp_init2(req);
590
2177bab5 591 if (lmp_bredr_capable(hdev))
42c6b129 592 bredr_setup(req);
56f87901 593 else
a358dc11 594 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
595
596 if (lmp_le_capable(hdev))
42c6b129 597 le_setup(req);
2177bab5 598
0f3adeae
MH
599 /* All Bluetooth 1.2 and later controllers should support the
600 * HCI command for reading the local supported commands.
601 *
602 * Unfortunately some controllers indicate Bluetooth 1.2 support,
603 * but do not have support for this command. If that is the case,
604 * the driver can quirk the behavior and skip reading the local
605 * supported commands.
3f8e2d75 606 */
0f3adeae
MH
607 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
608 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 609 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
610
611 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
612 /* When SSP is available, then the host features page
613 * should also be available as well. However some
614 * controllers list the max_page as 0 as long as SSP
615 * has not been enabled. To achieve proper debugging
616 * output, force the minimum max_page to 1 at least.
617 */
618 hdev->max_page = 0x01;
619
d7a5a11d 620 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 621 u8 mode = 0x01;
574ea3c7 622
42c6b129
JH
623 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
624 sizeof(mode), &mode);
2177bab5
JH
625 } else {
626 struct hci_cp_write_eir cp;
627
628 memset(hdev->eir, 0, sizeof(hdev->eir));
629 memset(&cp, 0, sizeof(cp));
630
42c6b129 631 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
632 }
633 }
634
043ec9bf
MH
635 if (lmp_inq_rssi_capable(hdev) ||
636 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
637 u8 mode;
638
639 /* If Extended Inquiry Result events are supported, then
640 * they are clearly preferred over Inquiry Result with RSSI
641 * events.
642 */
643 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
644
645 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
646 }
2177bab5
JH
647
648 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 649 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
650
651 if (lmp_ext_feat_capable(hdev)) {
652 struct hci_cp_read_local_ext_features cp;
653
654 cp.page = 0x01;
42c6b129
JH
655 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
656 sizeof(cp), &cp);
2177bab5
JH
657 }
658
d7a5a11d 659 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 660 u8 enable = 1;
42c6b129
JH
661 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
662 &enable);
2177bab5
JH
663 }
664}
665
42c6b129 666static void hci_setup_link_policy(struct hci_request *req)
2177bab5 667{
42c6b129 668 struct hci_dev *hdev = req->hdev;
2177bab5
JH
669 struct hci_cp_write_def_link_policy cp;
670 u16 link_policy = 0;
671
672 if (lmp_rswitch_capable(hdev))
673 link_policy |= HCI_LP_RSWITCH;
674 if (lmp_hold_capable(hdev))
675 link_policy |= HCI_LP_HOLD;
676 if (lmp_sniff_capable(hdev))
677 link_policy |= HCI_LP_SNIFF;
678 if (lmp_park_capable(hdev))
679 link_policy |= HCI_LP_PARK;
680
681 cp.policy = cpu_to_le16(link_policy);
42c6b129 682 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
683}
684
42c6b129 685static void hci_set_le_support(struct hci_request *req)
2177bab5 686{
42c6b129 687 struct hci_dev *hdev = req->hdev;
2177bab5
JH
688 struct hci_cp_write_le_host_supported cp;
689
c73eee91
JH
690 /* LE-only devices do not support explicit enablement */
691 if (!lmp_bredr_capable(hdev))
692 return;
693
2177bab5
JH
694 memset(&cp, 0, sizeof(cp));
695
d7a5a11d 696 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 697 cp.le = 0x01;
32226e4f 698 cp.simul = 0x00;
2177bab5
JH
699 }
700
701 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
702 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
703 &cp);
2177bab5
JH
704}
705
d62e6d67
JH
706static void hci_set_event_mask_page_2(struct hci_request *req)
707{
708 struct hci_dev *hdev = req->hdev;
709 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
710
711 /* If Connectionless Slave Broadcast master role is supported
712 * enable all necessary events for it.
713 */
53b834d2 714 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
715 events[1] |= 0x40; /* Triggered Clock Capture */
716 events[1] |= 0x80; /* Synchronization Train Complete */
717 events[2] |= 0x10; /* Slave Page Response Timeout */
718 events[2] |= 0x20; /* CSB Channel Map Change */
719 }
720
721 /* If Connectionless Slave Broadcast slave role is supported
722 * enable all necessary events for it.
723 */
53b834d2 724 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
725 events[2] |= 0x01; /* Synchronization Train Received */
726 events[2] |= 0x02; /* CSB Receive */
727 events[2] |= 0x04; /* CSB Timeout */
728 events[2] |= 0x08; /* Truncated Page Complete */
729 }
730
40c59fcb 731 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 732 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
733 events[2] |= 0x80;
734
d62e6d67
JH
735 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
736}
737
42c6b129 738static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 739{
42c6b129 740 struct hci_dev *hdev = req->hdev;
d2c5d77f 741 u8 p;
42c6b129 742
0da71f1b
MH
743 hci_setup_event_mask(req);
744
48ce62c4
MH
745 if (hdev->commands[6] & 0x20) {
746 struct hci_cp_read_stored_link_key cp;
747
748 bacpy(&cp.bdaddr, BDADDR_ANY);
749 cp.read_all = 0x01;
750 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
751 }
752
2177bab5 753 if (hdev->commands[5] & 0x10)
42c6b129 754 hci_setup_link_policy(req);
2177bab5 755
417287de
MH
756 if (hdev->commands[8] & 0x01)
757 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
758
759 /* Some older Broadcom based Bluetooth 1.2 controllers do not
760 * support the Read Page Scan Type command. Check support for
761 * this command in the bit mask of supported commands.
762 */
763 if (hdev->commands[13] & 0x01)
764 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
765
9193c6e8
AG
766 if (lmp_le_capable(hdev)) {
767 u8 events[8];
768
769 memset(events, 0, sizeof(events));
4d6c705b
MH
770 events[0] = 0x0f;
771
772 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
773 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
774
775 /* If controller supports the Connection Parameters Request
776 * Link Layer Procedure, enable the corresponding event.
777 */
778 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
779 events[0] |= 0x20; /* LE Remote Connection
780 * Parameter Request
781 */
782
a9f6068e
MH
783 /* If the controller supports the Data Length Extension
784 * feature, enable the corresponding event.
785 */
786 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
787 events[0] |= 0x40; /* LE Data Length Change */
788
4b71bba4
MH
789 /* If the controller supports Extended Scanner Filter
790 * Policies, enable the correspondig event.
791 */
792 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
793 events[1] |= 0x04; /* LE Direct Advertising
794 * Report
795 */
796
5a34bd5f
MH
797 /* If the controller supports the LE Read Local P-256
798 * Public Key command, enable the corresponding event.
799 */
800 if (hdev->commands[34] & 0x02)
801 events[0] |= 0x80; /* LE Read Local P-256
802 * Public Key Complete
803 */
804
805 /* If the controller supports the LE Generate DHKey
806 * command, enable the corresponding event.
807 */
808 if (hdev->commands[34] & 0x04)
809 events[1] |= 0x01; /* LE Generate DHKey Complete */
810
9193c6e8
AG
811 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
812 events);
813
15a49cca
MH
814 if (hdev->commands[25] & 0x40) {
815 /* Read LE Advertising Channel TX Power */
816 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
817 }
818
a9f6068e
MH
819 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
820 /* Read LE Maximum Data Length */
821 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
822
823 /* Read LE Suggested Default Data Length */
824 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
825 }
826
42c6b129 827 hci_set_le_support(req);
9193c6e8 828 }
d2c5d77f
JH
829
830 /* Read features beyond page 1 if available */
831 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
832 struct hci_cp_read_local_ext_features cp;
833
834 cp.page = p;
835 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
836 sizeof(cp), &cp);
837 }
2177bab5
JH
838}
839
5d4e7e8d
JH
840static void hci_init4_req(struct hci_request *req, unsigned long opt)
841{
842 struct hci_dev *hdev = req->hdev;
843
36f260ce
MH
844 /* Some Broadcom based Bluetooth controllers do not support the
845 * Delete Stored Link Key command. They are clearly indicating its
846 * absence in the bit mask of supported commands.
847 *
848 * Check the supported commands and only if the the command is marked
849 * as supported send it. If not supported assume that the controller
850 * does not have actual support for stored link keys which makes this
851 * command redundant anyway.
852 *
853 * Some controllers indicate that they support handling deleting
854 * stored link keys, but they don't. The quirk lets a driver
855 * just disable this command.
856 */
857 if (hdev->commands[6] & 0x80 &&
858 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
859 struct hci_cp_delete_stored_link_key cp;
860
861 bacpy(&cp.bdaddr, BDADDR_ANY);
862 cp.delete_all = 0x01;
863 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
864 sizeof(cp), &cp);
865 }
866
d62e6d67
JH
867 /* Set event mask page 2 if the HCI command for it is supported */
868 if (hdev->commands[22] & 0x04)
869 hci_set_event_mask_page_2(req);
870
109e3191
MH
871 /* Read local codec list if the HCI command is supported */
872 if (hdev->commands[29] & 0x20)
873 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
874
f4fe73ed
MH
875 /* Get MWS transport configuration if the HCI command is supported */
876 if (hdev->commands[30] & 0x08)
877 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
878
5d4e7e8d 879 /* Check for Synchronization Train support */
53b834d2 880 if (lmp_sync_train_capable(hdev))
5d4e7e8d 881 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
882
883 /* Enable Secure Connections if supported and configured */
d7a5a11d 884 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 885 bredr_sc_enabled(hdev)) {
a6d0d690 886 u8 support = 0x01;
574ea3c7 887
a6d0d690
MH
888 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
889 sizeof(support), &support);
890 }
5d4e7e8d
JH
891}
892
2177bab5
JH
893static int __hci_init(struct hci_dev *hdev)
894{
895 int err;
896
897 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
898 if (err < 0)
899 return err;
900
4b4148e9
MH
901 /* The Device Under Test (DUT) mode is special and available for
902 * all controller types. So just create it early on.
903 */
d7a5a11d 904 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
4b4148e9
MH
905 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
906 &dut_mode_fops);
907 }
908
0af801b9
JH
909 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
910 if (err < 0)
911 return err;
912
2177bab5
JH
913 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
914 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 915 * first two stages of init.
2177bab5
JH
916 */
917 if (hdev->dev_type != HCI_BREDR)
918 return 0;
919
5d4e7e8d
JH
920 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
921 if (err < 0)
922 return err;
923
baf27f6e
MH
924 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
925 if (err < 0)
926 return err;
927
ec6cef9c
MH
928 /* This function is only called when the controller is actually in
929 * configured state. When the controller is marked as unconfigured,
930 * this initialization procedure is not run.
931 *
932 * It means that it is possible that a controller runs through its
933 * setup phase and then discovers missing settings. If that is the
934 * case, then this function will not be called. It then will only
935 * be called during the config phase.
936 *
937 * So only when in setup phase or config phase, create the debugfs
938 * entries and register the SMP channels.
baf27f6e 939 */
d7a5a11d
MH
940 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
941 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
942 return 0;
943
60c5f5fb
MH
944 hci_debugfs_create_common(hdev);
945
71c3b60e 946 if (lmp_bredr_capable(hdev))
60c5f5fb 947 hci_debugfs_create_bredr(hdev);
2bfa3531 948
162a3bac 949 if (lmp_le_capable(hdev))
60c5f5fb 950 hci_debugfs_create_le(hdev);
e7b8fc92 951
baf27f6e 952 return 0;
2177bab5
JH
953}
954
0ebca7d6
MH
955static void hci_init0_req(struct hci_request *req, unsigned long opt)
956{
957 struct hci_dev *hdev = req->hdev;
958
959 BT_DBG("%s %ld", hdev->name, opt);
960
961 /* Reset */
962 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
963 hci_reset_req(req, 0);
964
965 /* Read Local Version */
966 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
967
968 /* Read BD Address */
969 if (hdev->set_bdaddr)
970 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
971}
972
973static int __hci_unconf_init(struct hci_dev *hdev)
974{
975 int err;
976
cc78b44b
MH
977 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
978 return 0;
979
0ebca7d6
MH
980 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
981 if (err < 0)
982 return err;
983
984 return 0;
985}
986
42c6b129 987static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
988{
989 __u8 scan = opt;
990
42c6b129 991 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
992
993 /* Inquiry and Page scans */
42c6b129 994 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
995}
996
42c6b129 997static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
998{
999 __u8 auth = opt;
1000
42c6b129 1001 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1002
1003 /* Authentication */
42c6b129 1004 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1005}
1006
42c6b129 1007static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1008{
1009 __u8 encrypt = opt;
1010
42c6b129 1011 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1012
e4e8e37c 1013 /* Encryption */
42c6b129 1014 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1015}
1016
42c6b129 1017static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1018{
1019 __le16 policy = cpu_to_le16(opt);
1020
42c6b129 1021 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1022
1023 /* Default link policy */
42c6b129 1024 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1025}
1026
8e87d142 1027/* Get HCI device by index.
1da177e4
LT
1028 * Device is held on return. */
1029struct hci_dev *hci_dev_get(int index)
1030{
8035ded4 1031 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1032
1033 BT_DBG("%d", index);
1034
1035 if (index < 0)
1036 return NULL;
1037
1038 read_lock(&hci_dev_list_lock);
8035ded4 1039 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1040 if (d->id == index) {
1041 hdev = hci_dev_hold(d);
1042 break;
1043 }
1044 }
1045 read_unlock(&hci_dev_list_lock);
1046 return hdev;
1047}
1da177e4
LT
1048
1049/* ---- Inquiry support ---- */
ff9ef578 1050
30dc78e1
JH
1051bool hci_discovery_active(struct hci_dev *hdev)
1052{
1053 struct discovery_state *discov = &hdev->discovery;
1054
6fbe195d 1055 switch (discov->state) {
343f935b 1056 case DISCOVERY_FINDING:
6fbe195d 1057 case DISCOVERY_RESOLVING:
30dc78e1
JH
1058 return true;
1059
6fbe195d
AG
1060 default:
1061 return false;
1062 }
30dc78e1
JH
1063}
1064
ff9ef578
JH
1065void hci_discovery_set_state(struct hci_dev *hdev, int state)
1066{
bb3e0a33
JH
1067 int old_state = hdev->discovery.state;
1068
ff9ef578
JH
1069 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1070
bb3e0a33 1071 if (old_state == state)
ff9ef578
JH
1072 return;
1073
bb3e0a33
JH
1074 hdev->discovery.state = state;
1075
ff9ef578
JH
1076 switch (state) {
1077 case DISCOVERY_STOPPED:
c54c3860
AG
1078 hci_update_background_scan(hdev);
1079
bb3e0a33 1080 if (old_state != DISCOVERY_STARTING)
7b99b659 1081 mgmt_discovering(hdev, 0);
ff9ef578
JH
1082 break;
1083 case DISCOVERY_STARTING:
1084 break;
343f935b 1085 case DISCOVERY_FINDING:
ff9ef578
JH
1086 mgmt_discovering(hdev, 1);
1087 break;
30dc78e1
JH
1088 case DISCOVERY_RESOLVING:
1089 break;
ff9ef578
JH
1090 case DISCOVERY_STOPPING:
1091 break;
1092 }
ff9ef578
JH
1093}
1094
1f9b9a5d 1095void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1096{
30883512 1097 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1098 struct inquiry_entry *p, *n;
1da177e4 1099
561aafbc
JH
1100 list_for_each_entry_safe(p, n, &cache->all, all) {
1101 list_del(&p->all);
b57c1a56 1102 kfree(p);
1da177e4 1103 }
561aafbc
JH
1104
1105 INIT_LIST_HEAD(&cache->unknown);
1106 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1107}
1108
a8c5fb1a
GP
1109struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1110 bdaddr_t *bdaddr)
1da177e4 1111{
30883512 1112 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1113 struct inquiry_entry *e;
1114
6ed93dc6 1115 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1116
561aafbc
JH
1117 list_for_each_entry(e, &cache->all, all) {
1118 if (!bacmp(&e->data.bdaddr, bdaddr))
1119 return e;
1120 }
1121
1122 return NULL;
1123}
1124
1125struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1126 bdaddr_t *bdaddr)
561aafbc 1127{
30883512 1128 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1129 struct inquiry_entry *e;
1130
6ed93dc6 1131 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1132
1133 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1134 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1135 return e;
1136 }
1137
1138 return NULL;
1da177e4
LT
1139}
1140
30dc78e1 1141struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1142 bdaddr_t *bdaddr,
1143 int state)
30dc78e1
JH
1144{
1145 struct discovery_state *cache = &hdev->discovery;
1146 struct inquiry_entry *e;
1147
6ed93dc6 1148 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1149
1150 list_for_each_entry(e, &cache->resolve, list) {
1151 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1152 return e;
1153 if (!bacmp(&e->data.bdaddr, bdaddr))
1154 return e;
1155 }
1156
1157 return NULL;
1158}
1159
a3d4e20a 1160void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1161 struct inquiry_entry *ie)
a3d4e20a
JH
1162{
1163 struct discovery_state *cache = &hdev->discovery;
1164 struct list_head *pos = &cache->resolve;
1165 struct inquiry_entry *p;
1166
1167 list_del(&ie->list);
1168
1169 list_for_each_entry(p, &cache->resolve, list) {
1170 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1171 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1172 break;
1173 pos = &p->list;
1174 }
1175
1176 list_add(&ie->list, pos);
1177}
1178
af58925c
MH
1179u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1180 bool name_known)
1da177e4 1181{
30883512 1182 struct discovery_state *cache = &hdev->discovery;
70f23020 1183 struct inquiry_entry *ie;
af58925c 1184 u32 flags = 0;
1da177e4 1185
6ed93dc6 1186 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1187
6928a924 1188 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1189
af58925c
MH
1190 if (!data->ssp_mode)
1191 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1192
70f23020 1193 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1194 if (ie) {
af58925c
MH
1195 if (!ie->data.ssp_mode)
1196 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1197
a3d4e20a 1198 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1199 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1200 ie->data.rssi = data->rssi;
1201 hci_inquiry_cache_update_resolve(hdev, ie);
1202 }
1203
561aafbc 1204 goto update;
a3d4e20a 1205 }
561aafbc
JH
1206
1207 /* Entry not in the cache. Add new one. */
27f70f3e 1208 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1209 if (!ie) {
1210 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1211 goto done;
1212 }
561aafbc
JH
1213
1214 list_add(&ie->all, &cache->all);
1215
1216 if (name_known) {
1217 ie->name_state = NAME_KNOWN;
1218 } else {
1219 ie->name_state = NAME_NOT_KNOWN;
1220 list_add(&ie->list, &cache->unknown);
1221 }
70f23020 1222
561aafbc
JH
1223update:
1224 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1225 ie->name_state != NAME_PENDING) {
561aafbc
JH
1226 ie->name_state = NAME_KNOWN;
1227 list_del(&ie->list);
1da177e4
LT
1228 }
1229
70f23020
AE
1230 memcpy(&ie->data, data, sizeof(*data));
1231 ie->timestamp = jiffies;
1da177e4 1232 cache->timestamp = jiffies;
3175405b
JH
1233
1234 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1235 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1236
af58925c
MH
1237done:
1238 return flags;
1da177e4
LT
1239}
1240
1241static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1242{
30883512 1243 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1244 struct inquiry_info *info = (struct inquiry_info *) buf;
1245 struct inquiry_entry *e;
1246 int copied = 0;
1247
561aafbc 1248 list_for_each_entry(e, &cache->all, all) {
1da177e4 1249 struct inquiry_data *data = &e->data;
b57c1a56
JH
1250
1251 if (copied >= num)
1252 break;
1253
1da177e4
LT
1254 bacpy(&info->bdaddr, &data->bdaddr);
1255 info->pscan_rep_mode = data->pscan_rep_mode;
1256 info->pscan_period_mode = data->pscan_period_mode;
1257 info->pscan_mode = data->pscan_mode;
1258 memcpy(info->dev_class, data->dev_class, 3);
1259 info->clock_offset = data->clock_offset;
b57c1a56 1260
1da177e4 1261 info++;
b57c1a56 1262 copied++;
1da177e4
LT
1263 }
1264
1265 BT_DBG("cache %p, copied %d", cache, copied);
1266 return copied;
1267}
1268
42c6b129 1269static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1270{
1271 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1272 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1273 struct hci_cp_inquiry cp;
1274
1275 BT_DBG("%s", hdev->name);
1276
1277 if (test_bit(HCI_INQUIRY, &hdev->flags))
1278 return;
1279
1280 /* Start Inquiry */
1281 memcpy(&cp.lap, &ir->lap, 3);
1282 cp.length = ir->length;
1283 cp.num_rsp = ir->num_rsp;
42c6b129 1284 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1285}
1286
1287int hci_inquiry(void __user *arg)
1288{
1289 __u8 __user *ptr = arg;
1290 struct hci_inquiry_req ir;
1291 struct hci_dev *hdev;
1292 int err = 0, do_inquiry = 0, max_rsp;
1293 long timeo;
1294 __u8 *buf;
1295
1296 if (copy_from_user(&ir, ptr, sizeof(ir)))
1297 return -EFAULT;
1298
5a08ecce
AE
1299 hdev = hci_dev_get(ir.dev_id);
1300 if (!hdev)
1da177e4
LT
1301 return -ENODEV;
1302
d7a5a11d 1303 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1304 err = -EBUSY;
1305 goto done;
1306 }
1307
d7a5a11d 1308 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1309 err = -EOPNOTSUPP;
1310 goto done;
1311 }
1312
5b69bef5
MH
1313 if (hdev->dev_type != HCI_BREDR) {
1314 err = -EOPNOTSUPP;
1315 goto done;
1316 }
1317
d7a5a11d 1318 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1319 err = -EOPNOTSUPP;
1320 goto done;
1321 }
1322
09fd0de5 1323 hci_dev_lock(hdev);
8e87d142 1324 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1325 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1326 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1327 do_inquiry = 1;
1328 }
09fd0de5 1329 hci_dev_unlock(hdev);
1da177e4 1330
04837f64 1331 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1332
1333 if (do_inquiry) {
01178cd4
JH
1334 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1335 timeo);
70f23020
AE
1336 if (err < 0)
1337 goto done;
3e13fa1e
AG
1338
1339 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1340 * cleared). If it is interrupted by a signal, return -EINTR.
1341 */
74316201 1342 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1343 TASK_INTERRUPTIBLE))
1344 return -EINTR;
70f23020 1345 }
1da177e4 1346
8fc9ced3
GP
1347 /* for unlimited number of responses we will use buffer with
1348 * 255 entries
1349 */
1da177e4
LT
1350 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1351
1352 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1353 * copy it to the user space.
1354 */
01df8c31 1355 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1356 if (!buf) {
1da177e4
LT
1357 err = -ENOMEM;
1358 goto done;
1359 }
1360
09fd0de5 1361 hci_dev_lock(hdev);
1da177e4 1362 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1363 hci_dev_unlock(hdev);
1da177e4
LT
1364
1365 BT_DBG("num_rsp %d", ir.num_rsp);
1366
1367 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1368 ptr += sizeof(ir);
1369 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1370 ir.num_rsp))
1da177e4 1371 err = -EFAULT;
8e87d142 1372 } else
1da177e4
LT
1373 err = -EFAULT;
1374
1375 kfree(buf);
1376
1377done:
1378 hci_dev_put(hdev);
1379 return err;
1380}
1381
cbed0ca1 1382static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1383{
1da177e4
LT
1384 int ret = 0;
1385
1da177e4
LT
1386 BT_DBG("%s %p", hdev->name, hdev);
1387
1388 hci_req_lock(hdev);
1389
d7a5a11d 1390 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1391 ret = -ENODEV;
1392 goto done;
1393 }
1394
d7a5a11d
MH
1395 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1396 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1397 /* Check for rfkill but allow the HCI setup stage to
1398 * proceed (which in itself doesn't cause any RF activity).
1399 */
d7a5a11d 1400 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1401 ret = -ERFKILL;
1402 goto done;
1403 }
1404
1405 /* Check for valid public address or a configured static
1406 * random adddress, but let the HCI setup proceed to
1407 * be able to determine if there is a public address
1408 * or not.
1409 *
c6beca0e
MH
1410 * In case of user channel usage, it is not important
1411 * if a public address or static random address is
1412 * available.
1413 *
a5c8f270
MH
1414 * This check is only valid for BR/EDR controllers
1415 * since AMP controllers do not have an address.
1416 */
d7a5a11d 1417 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
c6beca0e 1418 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1419 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1420 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1421 ret = -EADDRNOTAVAIL;
1422 goto done;
1423 }
611b30f7
MH
1424 }
1425
1da177e4
LT
1426 if (test_bit(HCI_UP, &hdev->flags)) {
1427 ret = -EALREADY;
1428 goto done;
1429 }
1430
1da177e4
LT
1431 if (hdev->open(hdev)) {
1432 ret = -EIO;
1433 goto done;
1434 }
1435
f41c70c4
MH
1436 atomic_set(&hdev->cmd_cnt, 1);
1437 set_bit(HCI_INIT, &hdev->flags);
1438
d7a5a11d 1439 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
af202f84
MH
1440 if (hdev->setup)
1441 ret = hdev->setup(hdev);
f41c70c4 1442
af202f84
MH
1443 /* The transport driver can set these quirks before
1444 * creating the HCI device or in its setup callback.
1445 *
1446 * In case any of them is set, the controller has to
1447 * start up as unconfigured.
1448 */
eb1904f4
MH
1449 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1450 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1451 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1452
0ebca7d6
MH
1453 /* For an unconfigured controller it is required to
1454 * read at least the version information provided by
1455 * the Read Local Version Information command.
1456 *
1457 * If the set_bdaddr driver callback is provided, then
1458 * also the original Bluetooth public device address
1459 * will be read using the Read BD Address command.
1460 */
d7a5a11d 1461 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1462 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1463 }
1464
d7a5a11d 1465 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1466 /* If public address change is configured, ensure that
1467 * the address gets programmed. If the driver does not
1468 * support changing the public address, fail the power
1469 * on procedure.
1470 */
1471 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1472 hdev->set_bdaddr)
24c457e2
MH
1473 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1474 else
1475 ret = -EADDRNOTAVAIL;
1476 }
1477
f41c70c4 1478 if (!ret) {
d7a5a11d
MH
1479 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1480 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
f41c70c4 1481 ret = __hci_init(hdev);
1da177e4
LT
1482 }
1483
f41c70c4
MH
1484 clear_bit(HCI_INIT, &hdev->flags);
1485
1da177e4
LT
1486 if (!ret) {
1487 hci_dev_hold(hdev);
a1536da2 1488 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4
LT
1489 set_bit(HCI_UP, &hdev->flags);
1490 hci_notify(hdev, HCI_DEV_UP);
d7a5a11d
MH
1491 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1492 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1493 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1494 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1514b892 1495 hdev->dev_type == HCI_BREDR) {
09fd0de5 1496 hci_dev_lock(hdev);
744cf19e 1497 mgmt_powered(hdev, 1);
09fd0de5 1498 hci_dev_unlock(hdev);
56e5cb86 1499 }
8e87d142 1500 } else {
1da177e4 1501 /* Init failed, cleanup */
3eff45ea 1502 flush_work(&hdev->tx_work);
c347b765 1503 flush_work(&hdev->cmd_work);
b78752cc 1504 flush_work(&hdev->rx_work);
1da177e4
LT
1505
1506 skb_queue_purge(&hdev->cmd_q);
1507 skb_queue_purge(&hdev->rx_q);
1508
1509 if (hdev->flush)
1510 hdev->flush(hdev);
1511
1512 if (hdev->sent_cmd) {
1513 kfree_skb(hdev->sent_cmd);
1514 hdev->sent_cmd = NULL;
1515 }
1516
1517 hdev->close(hdev);
fee746b0 1518 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1519 }
1520
1521done:
1522 hci_req_unlock(hdev);
1da177e4
LT
1523 return ret;
1524}
1525
cbed0ca1
JH
1526/* ---- HCI ioctl helpers ---- */
1527
1528int hci_dev_open(__u16 dev)
1529{
1530 struct hci_dev *hdev;
1531 int err;
1532
1533 hdev = hci_dev_get(dev);
1534 if (!hdev)
1535 return -ENODEV;
1536
4a964404 1537 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1538 * up as user channel. Trying to bring them up as normal devices
1539 * will result into a failure. Only user channel operation is
1540 * possible.
1541 *
1542 * When this function is called for a user channel, the flag
1543 * HCI_USER_CHANNEL will be set first before attempting to
1544 * open the device.
1545 */
d7a5a11d
MH
1546 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1547 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1548 err = -EOPNOTSUPP;
1549 goto done;
1550 }
1551
e1d08f40
JH
1552 /* We need to ensure that no other power on/off work is pending
1553 * before proceeding to call hci_dev_do_open. This is
1554 * particularly important if the setup procedure has not yet
1555 * completed.
1556 */
a69d8927 1557 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1558 cancel_delayed_work(&hdev->power_off);
1559
a5c8f270
MH
1560 /* After this call it is guaranteed that the setup procedure
1561 * has finished. This means that error conditions like RFKILL
1562 * or no valid public or static random address apply.
1563 */
e1d08f40
JH
1564 flush_workqueue(hdev->req_workqueue);
1565
12aa4f0a 1566 /* For controllers not using the management interface and that
b6ae8457 1567 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1568 * so that pairing works for them. Once the management interface
1569 * is in use this bit will be cleared again and userspace has
1570 * to explicitly enable it.
1571 */
d7a5a11d
MH
1572 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1573 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1574 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1575
cbed0ca1
JH
1576 err = hci_dev_do_open(hdev);
1577
fee746b0 1578done:
cbed0ca1 1579 hci_dev_put(hdev);
cbed0ca1
JH
1580 return err;
1581}
1582
d7347f3c
JH
1583/* This function requires the caller holds hdev->lock */
1584static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1585{
1586 struct hci_conn_params *p;
1587
f161dd41
JH
1588 list_for_each_entry(p, &hdev->le_conn_params, list) {
1589 if (p->conn) {
1590 hci_conn_drop(p->conn);
f8aaf9b6 1591 hci_conn_put(p->conn);
f161dd41
JH
1592 p->conn = NULL;
1593 }
d7347f3c 1594 list_del_init(&p->action);
f161dd41 1595 }
d7347f3c
JH
1596
1597 BT_DBG("All LE pending actions cleared");
1598}
1599
1da177e4
LT
1600static int hci_dev_do_close(struct hci_dev *hdev)
1601{
1602 BT_DBG("%s %p", hdev->name, hdev);
1603
d7a5a11d 1604 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
a44fecbd
THJA
1605 /* Execute vendor specific shutdown routine */
1606 if (hdev->shutdown)
1607 hdev->shutdown(hdev);
1608 }
1609
78c04c0b
VCG
1610 cancel_delayed_work(&hdev->power_off);
1611
1da177e4
LT
1612 hci_req_cancel(hdev, ENODEV);
1613 hci_req_lock(hdev);
1614
1615 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1616 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1617 hci_req_unlock(hdev);
1618 return 0;
1619 }
1620
3eff45ea
GP
1621 /* Flush RX and TX works */
1622 flush_work(&hdev->tx_work);
b78752cc 1623 flush_work(&hdev->rx_work);
1da177e4 1624
16ab91ab 1625 if (hdev->discov_timeout > 0) {
e0f9309f 1626 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1627 hdev->discov_timeout = 0;
a358dc11
MH
1628 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1629 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1630 }
1631
a69d8927 1632 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1633 cancel_delayed_work(&hdev->service_cache);
1634
7ba8b4be 1635 cancel_delayed_work_sync(&hdev->le_scan_disable);
2d28cfe7 1636 cancel_delayed_work_sync(&hdev->le_scan_restart);
4518bb0f 1637
d7a5a11d 1638 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1639 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1640
76727c02
JH
1641 /* Avoid potential lockdep warnings from the *_flush() calls by
1642 * ensuring the workqueue is empty up front.
1643 */
1644 drain_workqueue(hdev->workqueue);
1645
09fd0de5 1646 hci_dev_lock(hdev);
1aeb9c65 1647
8f502f84
JH
1648 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1649
a69d8927 1650 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1aeb9c65
JH
1651 if (hdev->dev_type == HCI_BREDR)
1652 mgmt_powered(hdev, 0);
1653 }
1654
1f9b9a5d 1655 hci_inquiry_cache_flush(hdev);
d7347f3c 1656 hci_pend_le_actions_clear(hdev);
f161dd41 1657 hci_conn_hash_flush(hdev);
09fd0de5 1658 hci_dev_unlock(hdev);
1da177e4 1659
64dae967
MH
1660 smp_unregister(hdev);
1661
1da177e4
LT
1662 hci_notify(hdev, HCI_DEV_DOWN);
1663
1664 if (hdev->flush)
1665 hdev->flush(hdev);
1666
1667 /* Reset device */
1668 skb_queue_purge(&hdev->cmd_q);
1669 atomic_set(&hdev->cmd_cnt, 1);
d7a5a11d
MH
1670 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1671 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
a6c511c6 1672 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1673 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1674 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1675 clear_bit(HCI_INIT, &hdev->flags);
1676 }
1677
c347b765
GP
1678 /* flush cmd work */
1679 flush_work(&hdev->cmd_work);
1da177e4
LT
1680
1681 /* Drop queues */
1682 skb_queue_purge(&hdev->rx_q);
1683 skb_queue_purge(&hdev->cmd_q);
1684 skb_queue_purge(&hdev->raw_q);
1685
1686 /* Drop last sent command */
1687 if (hdev->sent_cmd) {
65cc2b49 1688 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1689 kfree_skb(hdev->sent_cmd);
1690 hdev->sent_cmd = NULL;
1691 }
1692
b6ddb638
JH
1693 kfree_skb(hdev->recv_evt);
1694 hdev->recv_evt = NULL;
1695
1da177e4
LT
1696 /* After this point our queues are empty
1697 * and no tasks are scheduled. */
1698 hdev->close(hdev);
1699
35b973c9 1700 /* Clear flags */
fee746b0 1701 hdev->flags &= BIT(HCI_RAW);
eacb44df 1702 hci_dev_clear_volatile_flags(hdev);
35b973c9 1703
ced5c338 1704 /* Controller radio is available but is currently powered down */
536619e8 1705 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1706
e59fda8d 1707 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1708 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1709 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1710
1da177e4
LT
1711 hci_req_unlock(hdev);
1712
1713 hci_dev_put(hdev);
1714 return 0;
1715}
1716
1717int hci_dev_close(__u16 dev)
1718{
1719 struct hci_dev *hdev;
1720 int err;
1721
70f23020
AE
1722 hdev = hci_dev_get(dev);
1723 if (!hdev)
1da177e4 1724 return -ENODEV;
8ee56540 1725
d7a5a11d 1726 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1727 err = -EBUSY;
1728 goto done;
1729 }
1730
a69d8927 1731 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1732 cancel_delayed_work(&hdev->power_off);
1733
1da177e4 1734 err = hci_dev_do_close(hdev);
8ee56540 1735
0736cfa8 1736done:
1da177e4
LT
1737 hci_dev_put(hdev);
1738 return err;
1739}
1740
5c912495 1741static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1742{
5c912495 1743 int ret;
1da177e4 1744
5c912495 1745 BT_DBG("%s %p", hdev->name, hdev);
1da177e4
LT
1746
1747 hci_req_lock(hdev);
1da177e4 1748
1da177e4
LT
1749 /* Drop queues */
1750 skb_queue_purge(&hdev->rx_q);
1751 skb_queue_purge(&hdev->cmd_q);
1752
76727c02
JH
1753 /* Avoid potential lockdep warnings from the *_flush() calls by
1754 * ensuring the workqueue is empty up front.
1755 */
1756 drain_workqueue(hdev->workqueue);
1757
09fd0de5 1758 hci_dev_lock(hdev);
1f9b9a5d 1759 hci_inquiry_cache_flush(hdev);
1da177e4 1760 hci_conn_hash_flush(hdev);
09fd0de5 1761 hci_dev_unlock(hdev);
1da177e4
LT
1762
1763 if (hdev->flush)
1764 hdev->flush(hdev);
1765
8e87d142 1766 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1767 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1768
fee746b0 1769 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4 1770
1da177e4 1771 hci_req_unlock(hdev);
1da177e4
LT
1772 return ret;
1773}
1774
5c912495
MH
1775int hci_dev_reset(__u16 dev)
1776{
1777 struct hci_dev *hdev;
1778 int err;
1779
1780 hdev = hci_dev_get(dev);
1781 if (!hdev)
1782 return -ENODEV;
1783
1784 if (!test_bit(HCI_UP, &hdev->flags)) {
1785 err = -ENETDOWN;
1786 goto done;
1787 }
1788
d7a5a11d 1789 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1790 err = -EBUSY;
1791 goto done;
1792 }
1793
d7a5a11d 1794 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1795 err = -EOPNOTSUPP;
1796 goto done;
1797 }
1798
1799 err = hci_dev_do_reset(hdev);
1800
1801done:
1802 hci_dev_put(hdev);
1803 return err;
1804}
1805
1da177e4
LT
1806int hci_dev_reset_stat(__u16 dev)
1807{
1808 struct hci_dev *hdev;
1809 int ret = 0;
1810
70f23020
AE
1811 hdev = hci_dev_get(dev);
1812 if (!hdev)
1da177e4
LT
1813 return -ENODEV;
1814
d7a5a11d 1815 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1816 ret = -EBUSY;
1817 goto done;
1818 }
1819
d7a5a11d 1820 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1821 ret = -EOPNOTSUPP;
1822 goto done;
1823 }
1824
1da177e4
LT
1825 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1826
0736cfa8 1827done:
1da177e4 1828 hci_dev_put(hdev);
1da177e4
LT
1829 return ret;
1830}
1831
123abc08
JH
1832static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1833{
bc6d2d04 1834 bool conn_changed, discov_changed;
123abc08
JH
1835
1836 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1837
1838 if ((scan & SCAN_PAGE))
238be788
MH
1839 conn_changed = !hci_dev_test_and_set_flag(hdev,
1840 HCI_CONNECTABLE);
123abc08 1841 else
a69d8927
MH
1842 conn_changed = hci_dev_test_and_clear_flag(hdev,
1843 HCI_CONNECTABLE);
123abc08 1844
bc6d2d04 1845 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1846 discov_changed = !hci_dev_test_and_set_flag(hdev,
1847 HCI_DISCOVERABLE);
bc6d2d04 1848 } else {
a358dc11 1849 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1850 discov_changed = hci_dev_test_and_clear_flag(hdev,
1851 HCI_DISCOVERABLE);
bc6d2d04
JH
1852 }
1853
d7a5a11d 1854 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1855 return;
1856
bc6d2d04
JH
1857 if (conn_changed || discov_changed) {
1858 /* In case this was disabled through mgmt */
a1536da2 1859 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1860
d7a5a11d 1861 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
bc6d2d04
JH
1862 mgmt_update_adv_data(hdev);
1863
123abc08 1864 mgmt_new_settings(hdev);
bc6d2d04 1865 }
123abc08
JH
1866}
1867
1da177e4
LT
1868int hci_dev_cmd(unsigned int cmd, void __user *arg)
1869{
1870 struct hci_dev *hdev;
1871 struct hci_dev_req dr;
1872 int err = 0;
1873
1874 if (copy_from_user(&dr, arg, sizeof(dr)))
1875 return -EFAULT;
1876
70f23020
AE
1877 hdev = hci_dev_get(dr.dev_id);
1878 if (!hdev)
1da177e4
LT
1879 return -ENODEV;
1880
d7a5a11d 1881 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1882 err = -EBUSY;
1883 goto done;
1884 }
1885
d7a5a11d 1886 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1887 err = -EOPNOTSUPP;
1888 goto done;
1889 }
1890
5b69bef5
MH
1891 if (hdev->dev_type != HCI_BREDR) {
1892 err = -EOPNOTSUPP;
1893 goto done;
1894 }
1895
d7a5a11d 1896 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1897 err = -EOPNOTSUPP;
1898 goto done;
1899 }
1900
1da177e4
LT
1901 switch (cmd) {
1902 case HCISETAUTH:
01178cd4
JH
1903 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1904 HCI_INIT_TIMEOUT);
1da177e4
LT
1905 break;
1906
1907 case HCISETENCRYPT:
1908 if (!lmp_encrypt_capable(hdev)) {
1909 err = -EOPNOTSUPP;
1910 break;
1911 }
1912
1913 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1914 /* Auth must be enabled first */
01178cd4
JH
1915 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1916 HCI_INIT_TIMEOUT);
1da177e4
LT
1917 if (err)
1918 break;
1919 }
1920
01178cd4
JH
1921 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1922 HCI_INIT_TIMEOUT);
1da177e4
LT
1923 break;
1924
1925 case HCISETSCAN:
01178cd4
JH
1926 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1927 HCI_INIT_TIMEOUT);
91a668b0 1928
bc6d2d04
JH
1929 /* Ensure that the connectable and discoverable states
1930 * get correctly modified as this was a non-mgmt change.
91a668b0 1931 */
123abc08
JH
1932 if (!err)
1933 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1934 break;
1935
1da177e4 1936 case HCISETLINKPOL:
01178cd4
JH
1937 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1938 HCI_INIT_TIMEOUT);
1da177e4
LT
1939 break;
1940
1941 case HCISETLINKMODE:
e4e8e37c
MH
1942 hdev->link_mode = ((__u16) dr.dev_opt) &
1943 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1944 break;
1945
1946 case HCISETPTYPE:
1947 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1948 break;
1949
1950 case HCISETACLMTU:
e4e8e37c
MH
1951 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1952 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1953 break;
1954
1955 case HCISETSCOMTU:
e4e8e37c
MH
1956 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1957 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1958 break;
1959
1960 default:
1961 err = -EINVAL;
1962 break;
1963 }
e4e8e37c 1964
0736cfa8 1965done:
1da177e4
LT
1966 hci_dev_put(hdev);
1967 return err;
1968}
1969
1970int hci_get_dev_list(void __user *arg)
1971{
8035ded4 1972 struct hci_dev *hdev;
1da177e4
LT
1973 struct hci_dev_list_req *dl;
1974 struct hci_dev_req *dr;
1da177e4
LT
1975 int n = 0, size, err;
1976 __u16 dev_num;
1977
1978 if (get_user(dev_num, (__u16 __user *) arg))
1979 return -EFAULT;
1980
1981 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1982 return -EINVAL;
1983
1984 size = sizeof(*dl) + dev_num * sizeof(*dr);
1985
70f23020
AE
1986 dl = kzalloc(size, GFP_KERNEL);
1987 if (!dl)
1da177e4
LT
1988 return -ENOMEM;
1989
1990 dr = dl->dev_req;
1991
f20d09d5 1992 read_lock(&hci_dev_list_lock);
8035ded4 1993 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 1994 unsigned long flags = hdev->flags;
c542a06c 1995
2e84d8db
MH
1996 /* When the auto-off is configured it means the transport
1997 * is running, but in that case still indicate that the
1998 * device is actually down.
1999 */
d7a5a11d 2000 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 2001 flags &= ~BIT(HCI_UP);
c542a06c 2002
1da177e4 2003 (dr + n)->dev_id = hdev->id;
2e84d8db 2004 (dr + n)->dev_opt = flags;
c542a06c 2005
1da177e4
LT
2006 if (++n >= dev_num)
2007 break;
2008 }
f20d09d5 2009 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2010
2011 dl->dev_num = n;
2012 size = sizeof(*dl) + n * sizeof(*dr);
2013
2014 err = copy_to_user(arg, dl, size);
2015 kfree(dl);
2016
2017 return err ? -EFAULT : 0;
2018}
2019
2020int hci_get_dev_info(void __user *arg)
2021{
2022 struct hci_dev *hdev;
2023 struct hci_dev_info di;
2e84d8db 2024 unsigned long flags;
1da177e4
LT
2025 int err = 0;
2026
2027 if (copy_from_user(&di, arg, sizeof(di)))
2028 return -EFAULT;
2029
70f23020
AE
2030 hdev = hci_dev_get(di.dev_id);
2031 if (!hdev)
1da177e4
LT
2032 return -ENODEV;
2033
2e84d8db
MH
2034 /* When the auto-off is configured it means the transport
2035 * is running, but in that case still indicate that the
2036 * device is actually down.
2037 */
d7a5a11d 2038 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
2039 flags = hdev->flags & ~BIT(HCI_UP);
2040 else
2041 flags = hdev->flags;
c542a06c 2042
1da177e4
LT
2043 strcpy(di.name, hdev->name);
2044 di.bdaddr = hdev->bdaddr;
60f2a3ed 2045 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2046 di.flags = flags;
1da177e4 2047 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2048 if (lmp_bredr_capable(hdev)) {
2049 di.acl_mtu = hdev->acl_mtu;
2050 di.acl_pkts = hdev->acl_pkts;
2051 di.sco_mtu = hdev->sco_mtu;
2052 di.sco_pkts = hdev->sco_pkts;
2053 } else {
2054 di.acl_mtu = hdev->le_mtu;
2055 di.acl_pkts = hdev->le_pkts;
2056 di.sco_mtu = 0;
2057 di.sco_pkts = 0;
2058 }
1da177e4
LT
2059 di.link_policy = hdev->link_policy;
2060 di.link_mode = hdev->link_mode;
2061
2062 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2063 memcpy(&di.features, &hdev->features, sizeof(di.features));
2064
2065 if (copy_to_user(arg, &di, sizeof(di)))
2066 err = -EFAULT;
2067
2068 hci_dev_put(hdev);
2069
2070 return err;
2071}
2072
2073/* ---- Interface to HCI drivers ---- */
2074
611b30f7
MH
2075static int hci_rfkill_set_block(void *data, bool blocked)
2076{
2077 struct hci_dev *hdev = data;
2078
2079 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2080
d7a5a11d 2081 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2082 return -EBUSY;
2083
5e130367 2084 if (blocked) {
a1536da2 2085 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2086 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2087 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2088 hci_dev_do_close(hdev);
5e130367 2089 } else {
a358dc11 2090 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2091 }
611b30f7
MH
2092
2093 return 0;
2094}
2095
2096static const struct rfkill_ops hci_rfkill_ops = {
2097 .set_block = hci_rfkill_set_block,
2098};
2099
ab81cbf9
JH
2100static void hci_power_on(struct work_struct *work)
2101{
2102 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2103 int err;
ab81cbf9
JH
2104
2105 BT_DBG("%s", hdev->name);
2106
cbed0ca1 2107 err = hci_dev_do_open(hdev);
96570ffc 2108 if (err < 0) {
3ad67582 2109 hci_dev_lock(hdev);
96570ffc 2110 mgmt_set_powered_failed(hdev, err);
3ad67582 2111 hci_dev_unlock(hdev);
ab81cbf9 2112 return;
96570ffc 2113 }
ab81cbf9 2114
a5c8f270
MH
2115 /* During the HCI setup phase, a few error conditions are
2116 * ignored and they need to be checked now. If they are still
2117 * valid, it is important to turn the device back off.
2118 */
d7a5a11d
MH
2119 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2120 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
a5c8f270
MH
2121 (hdev->dev_type == HCI_BREDR &&
2122 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2123 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2124 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2125 hci_dev_do_close(hdev);
d7a5a11d 2126 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2127 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2128 HCI_AUTO_OFF_TIMEOUT);
bf543036 2129 }
ab81cbf9 2130
a69d8927 2131 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2132 /* For unconfigured devices, set the HCI_RAW flag
2133 * so that userspace can easily identify them.
4a964404 2134 */
d7a5a11d 2135 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2136 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2137
2138 /* For fully configured devices, this will send
2139 * the Index Added event. For unconfigured devices,
2140 * it will send Unconfigued Index Added event.
2141 *
2142 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2143 * and no event will be send.
2144 */
2145 mgmt_index_added(hdev);
a69d8927 2146 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2147 /* When the controller is now configured, then it
2148 * is important to clear the HCI_RAW flag.
2149 */
d7a5a11d 2150 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2151 clear_bit(HCI_RAW, &hdev->flags);
2152
d603b76b
MH
2153 /* Powering on the controller with HCI_CONFIG set only
2154 * happens with the transition from unconfigured to
2155 * configured. This will send the Index Added event.
2156 */
744cf19e 2157 mgmt_index_added(hdev);
fee746b0 2158 }
ab81cbf9
JH
2159}
2160
2161static void hci_power_off(struct work_struct *work)
2162{
3243553f 2163 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2164 power_off.work);
ab81cbf9
JH
2165
2166 BT_DBG("%s", hdev->name);
2167
8ee56540 2168 hci_dev_do_close(hdev);
ab81cbf9
JH
2169}
2170
c7741d16
MH
2171static void hci_error_reset(struct work_struct *work)
2172{
2173 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2174
2175 BT_DBG("%s", hdev->name);
2176
2177 if (hdev->hw_error)
2178 hdev->hw_error(hdev, hdev->hw_error_code);
2179 else
2180 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2181 hdev->hw_error_code);
2182
2183 if (hci_dev_do_close(hdev))
2184 return;
2185
c7741d16
MH
2186 hci_dev_do_open(hdev);
2187}
2188
16ab91ab
JH
2189static void hci_discov_off(struct work_struct *work)
2190{
2191 struct hci_dev *hdev;
16ab91ab
JH
2192
2193 hdev = container_of(work, struct hci_dev, discov_off.work);
2194
2195 BT_DBG("%s", hdev->name);
2196
d1967ff8 2197 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2198}
2199
35f7498a 2200void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2201{
4821002c 2202 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2203
4821002c
JH
2204 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2205 list_del(&uuid->list);
2aeb9a1a
JH
2206 kfree(uuid);
2207 }
2aeb9a1a
JH
2208}
2209
35f7498a 2210void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2211{
0378b597 2212 struct link_key *key;
55ed8ca1 2213
0378b597
JH
2214 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2215 list_del_rcu(&key->list);
2216 kfree_rcu(key, rcu);
55ed8ca1 2217 }
55ed8ca1
JH
2218}
2219
35f7498a 2220void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2221{
970d0f1b 2222 struct smp_ltk *k;
b899efaf 2223
970d0f1b
JH
2224 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2225 list_del_rcu(&k->list);
2226 kfree_rcu(k, rcu);
b899efaf 2227 }
b899efaf
VCG
2228}
2229
970c4e46
JH
2230void hci_smp_irks_clear(struct hci_dev *hdev)
2231{
adae20cb 2232 struct smp_irk *k;
970c4e46 2233
adae20cb
JH
2234 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2235 list_del_rcu(&k->list);
2236 kfree_rcu(k, rcu);
970c4e46
JH
2237 }
2238}
2239
55ed8ca1
JH
2240struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2241{
8035ded4 2242 struct link_key *k;
55ed8ca1 2243
0378b597
JH
2244 rcu_read_lock();
2245 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2246 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2247 rcu_read_unlock();
55ed8ca1 2248 return k;
0378b597
JH
2249 }
2250 }
2251 rcu_read_unlock();
55ed8ca1
JH
2252
2253 return NULL;
2254}
2255
745c0ce3 2256static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2257 u8 key_type, u8 old_key_type)
d25e28ab
JH
2258{
2259 /* Legacy key */
2260 if (key_type < 0x03)
745c0ce3 2261 return true;
d25e28ab
JH
2262
2263 /* Debug keys are insecure so don't store them persistently */
2264 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2265 return false;
d25e28ab
JH
2266
2267 /* Changed combination key and there's no previous one */
2268 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2269 return false;
d25e28ab
JH
2270
2271 /* Security mode 3 case */
2272 if (!conn)
745c0ce3 2273 return true;
d25e28ab 2274
e3befab9
JH
2275 /* BR/EDR key derived using SC from an LE link */
2276 if (conn->type == LE_LINK)
2277 return true;
2278
d25e28ab
JH
2279 /* Neither local nor remote side had no-bonding as requirement */
2280 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2281 return true;
d25e28ab
JH
2282
2283 /* Local side had dedicated bonding as requirement */
2284 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2285 return true;
d25e28ab
JH
2286
2287 /* Remote side had dedicated bonding as requirement */
2288 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2289 return true;
d25e28ab
JH
2290
2291 /* If none of the above criteria match, then don't store the key
2292 * persistently */
745c0ce3 2293 return false;
d25e28ab
JH
2294}
2295
e804d25d 2296static u8 ltk_role(u8 type)
98a0b845 2297{
e804d25d
JH
2298 if (type == SMP_LTK)
2299 return HCI_ROLE_MASTER;
98a0b845 2300
e804d25d 2301 return HCI_ROLE_SLAVE;
98a0b845
JH
2302}
2303
f3a73d97
JH
2304struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2305 u8 addr_type, u8 role)
75d262c2 2306{
c9839a11 2307 struct smp_ltk *k;
75d262c2 2308
970d0f1b
JH
2309 rcu_read_lock();
2310 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2311 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2312 continue;
2313
923e2414 2314 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2315 rcu_read_unlock();
75d262c2 2316 return k;
970d0f1b
JH
2317 }
2318 }
2319 rcu_read_unlock();
75d262c2
VCG
2320
2321 return NULL;
2322}
75d262c2 2323
970c4e46
JH
2324struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2325{
2326 struct smp_irk *irk;
2327
adae20cb
JH
2328 rcu_read_lock();
2329 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2330 if (!bacmp(&irk->rpa, rpa)) {
2331 rcu_read_unlock();
970c4e46 2332 return irk;
adae20cb 2333 }
970c4e46
JH
2334 }
2335
adae20cb 2336 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2337 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2338 bacpy(&irk->rpa, rpa);
adae20cb 2339 rcu_read_unlock();
970c4e46
JH
2340 return irk;
2341 }
2342 }
adae20cb 2343 rcu_read_unlock();
970c4e46
JH
2344
2345 return NULL;
2346}
2347
2348struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2349 u8 addr_type)
2350{
2351 struct smp_irk *irk;
2352
6cfc9988
JH
2353 /* Identity Address must be public or static random */
2354 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2355 return NULL;
2356
adae20cb
JH
2357 rcu_read_lock();
2358 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2359 if (addr_type == irk->addr_type &&
adae20cb
JH
2360 bacmp(bdaddr, &irk->bdaddr) == 0) {
2361 rcu_read_unlock();
970c4e46 2362 return irk;
adae20cb 2363 }
970c4e46 2364 }
adae20cb 2365 rcu_read_unlock();
970c4e46
JH
2366
2367 return NULL;
2368}
2369
567fa2aa 2370struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2371 bdaddr_t *bdaddr, u8 *val, u8 type,
2372 u8 pin_len, bool *persistent)
55ed8ca1
JH
2373{
2374 struct link_key *key, *old_key;
745c0ce3 2375 u8 old_key_type;
55ed8ca1
JH
2376
2377 old_key = hci_find_link_key(hdev, bdaddr);
2378 if (old_key) {
2379 old_key_type = old_key->type;
2380 key = old_key;
2381 } else {
12adcf3a 2382 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2383 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2384 if (!key)
567fa2aa 2385 return NULL;
0378b597 2386 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2387 }
2388
6ed93dc6 2389 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2390
d25e28ab
JH
2391 /* Some buggy controller combinations generate a changed
2392 * combination key for legacy pairing even when there's no
2393 * previous key */
2394 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2395 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2396 type = HCI_LK_COMBINATION;
655fe6ec
JH
2397 if (conn)
2398 conn->key_type = type;
2399 }
d25e28ab 2400
55ed8ca1 2401 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2402 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2403 key->pin_len = pin_len;
2404
b6020ba0 2405 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2406 key->type = old_key_type;
4748fed2
JH
2407 else
2408 key->type = type;
2409
7652ff6a
JH
2410 if (persistent)
2411 *persistent = hci_persistent_key(hdev, conn, type,
2412 old_key_type);
4df378a1 2413
567fa2aa 2414 return key;
55ed8ca1
JH
2415}
2416
ca9142b8 2417struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2418 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2419 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2420{
c9839a11 2421 struct smp_ltk *key, *old_key;
e804d25d 2422 u8 role = ltk_role(type);
75d262c2 2423
f3a73d97 2424 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2425 if (old_key)
75d262c2 2426 key = old_key;
c9839a11 2427 else {
0a14ab41 2428 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2429 if (!key)
ca9142b8 2430 return NULL;
970d0f1b 2431 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2432 }
2433
75d262c2 2434 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2435 key->bdaddr_type = addr_type;
2436 memcpy(key->val, tk, sizeof(key->val));
2437 key->authenticated = authenticated;
2438 key->ediv = ediv;
fe39c7b2 2439 key->rand = rand;
c9839a11
VCG
2440 key->enc_size = enc_size;
2441 key->type = type;
75d262c2 2442
ca9142b8 2443 return key;
75d262c2
VCG
2444}
2445
ca9142b8
JH
2446struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2447 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2448{
2449 struct smp_irk *irk;
2450
2451 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2452 if (!irk) {
2453 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2454 if (!irk)
ca9142b8 2455 return NULL;
970c4e46
JH
2456
2457 bacpy(&irk->bdaddr, bdaddr);
2458 irk->addr_type = addr_type;
2459
adae20cb 2460 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2461 }
2462
2463 memcpy(irk->val, val, 16);
2464 bacpy(&irk->rpa, rpa);
2465
ca9142b8 2466 return irk;
970c4e46
JH
2467}
2468
55ed8ca1
JH
2469int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2470{
2471 struct link_key *key;
2472
2473 key = hci_find_link_key(hdev, bdaddr);
2474 if (!key)
2475 return -ENOENT;
2476
6ed93dc6 2477 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2478
0378b597
JH
2479 list_del_rcu(&key->list);
2480 kfree_rcu(key, rcu);
55ed8ca1
JH
2481
2482 return 0;
2483}
2484
e0b2b27e 2485int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2486{
970d0f1b 2487 struct smp_ltk *k;
c51ffa0b 2488 int removed = 0;
b899efaf 2489
970d0f1b 2490 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2491 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2492 continue;
2493
6ed93dc6 2494 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2495
970d0f1b
JH
2496 list_del_rcu(&k->list);
2497 kfree_rcu(k, rcu);
c51ffa0b 2498 removed++;
b899efaf
VCG
2499 }
2500
c51ffa0b 2501 return removed ? 0 : -ENOENT;
b899efaf
VCG
2502}
2503
a7ec7338
JH
2504void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2505{
adae20cb 2506 struct smp_irk *k;
a7ec7338 2507
adae20cb 2508 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2509 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2510 continue;
2511
2512 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2513
adae20cb
JH
2514 list_del_rcu(&k->list);
2515 kfree_rcu(k, rcu);
a7ec7338
JH
2516 }
2517}
2518
55e76b38
JH
2519bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2520{
2521 struct smp_ltk *k;
4ba9faf3 2522 struct smp_irk *irk;
55e76b38
JH
2523 u8 addr_type;
2524
2525 if (type == BDADDR_BREDR) {
2526 if (hci_find_link_key(hdev, bdaddr))
2527 return true;
2528 return false;
2529 }
2530
2531 /* Convert to HCI addr type which struct smp_ltk uses */
2532 if (type == BDADDR_LE_PUBLIC)
2533 addr_type = ADDR_LE_DEV_PUBLIC;
2534 else
2535 addr_type = ADDR_LE_DEV_RANDOM;
2536
4ba9faf3
JH
2537 irk = hci_get_irk(hdev, bdaddr, addr_type);
2538 if (irk) {
2539 bdaddr = &irk->bdaddr;
2540 addr_type = irk->addr_type;
2541 }
2542
55e76b38
JH
2543 rcu_read_lock();
2544 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2545 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2546 rcu_read_unlock();
55e76b38 2547 return true;
87c8b28d 2548 }
55e76b38
JH
2549 }
2550 rcu_read_unlock();
2551
2552 return false;
2553}
2554
6bd32326 2555/* HCI command timer function */
65cc2b49 2556static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2557{
65cc2b49
MH
2558 struct hci_dev *hdev = container_of(work, struct hci_dev,
2559 cmd_timer.work);
6bd32326 2560
bda4f23a
AE
2561 if (hdev->sent_cmd) {
2562 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2563 u16 opcode = __le16_to_cpu(sent->opcode);
2564
2565 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2566 } else {
2567 BT_ERR("%s command tx timeout", hdev->name);
2568 }
2569
6bd32326 2570 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2571 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2572}
2573
2763eda6 2574struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2575 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2576{
2577 struct oob_data *data;
2578
6928a924
JH
2579 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2580 if (bacmp(bdaddr, &data->bdaddr) != 0)
2581 continue;
2582 if (data->bdaddr_type != bdaddr_type)
2583 continue;
2584 return data;
2585 }
2763eda6
SJ
2586
2587 return NULL;
2588}
2589
6928a924
JH
2590int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2591 u8 bdaddr_type)
2763eda6
SJ
2592{
2593 struct oob_data *data;
2594
6928a924 2595 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2596 if (!data)
2597 return -ENOENT;
2598
6928a924 2599 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2600
2601 list_del(&data->list);
2602 kfree(data);
2603
2604 return 0;
2605}
2606
35f7498a 2607void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2608{
2609 struct oob_data *data, *n;
2610
2611 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2612 list_del(&data->list);
2613 kfree(data);
2614 }
2763eda6
SJ
2615}
2616
0798872e 2617int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2618 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2619 u8 *hash256, u8 *rand256)
2763eda6
SJ
2620{
2621 struct oob_data *data;
2622
6928a924 2623 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2624 if (!data) {
0a14ab41 2625 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2626 if (!data)
2627 return -ENOMEM;
2628
2629 bacpy(&data->bdaddr, bdaddr);
6928a924 2630 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2631 list_add(&data->list, &hdev->remote_oob_data);
2632 }
2633
81328d5c
JH
2634 if (hash192 && rand192) {
2635 memcpy(data->hash192, hash192, sizeof(data->hash192));
2636 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2637 if (hash256 && rand256)
2638 data->present = 0x03;
81328d5c
JH
2639 } else {
2640 memset(data->hash192, 0, sizeof(data->hash192));
2641 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2642 if (hash256 && rand256)
2643 data->present = 0x02;
2644 else
2645 data->present = 0x00;
0798872e
MH
2646 }
2647
81328d5c
JH
2648 if (hash256 && rand256) {
2649 memcpy(data->hash256, hash256, sizeof(data->hash256));
2650 memcpy(data->rand256, rand256, sizeof(data->rand256));
2651 } else {
2652 memset(data->hash256, 0, sizeof(data->hash256));
2653 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2654 if (hash192 && rand192)
2655 data->present = 0x01;
81328d5c 2656 }
0798872e 2657
6ed93dc6 2658 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2659
2660 return 0;
2661}
2662
dcc36c16 2663struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2664 bdaddr_t *bdaddr, u8 type)
b2a66aad 2665{
8035ded4 2666 struct bdaddr_list *b;
b2a66aad 2667
dcc36c16 2668 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2669 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2670 return b;
b9ee0a78 2671 }
b2a66aad
AJ
2672
2673 return NULL;
2674}
2675
dcc36c16 2676void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
2677{
2678 struct list_head *p, *n;
2679
dcc36c16 2680 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 2681 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2682
2683 list_del(p);
2684 kfree(b);
2685 }
b2a66aad
AJ
2686}
2687
dcc36c16 2688int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2689{
2690 struct bdaddr_list *entry;
b2a66aad 2691
b9ee0a78 2692 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2693 return -EBADF;
2694
dcc36c16 2695 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2696 return -EEXIST;
b2a66aad 2697
27f70f3e 2698 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2699 if (!entry)
2700 return -ENOMEM;
b2a66aad
AJ
2701
2702 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2703 entry->bdaddr_type = type;
b2a66aad 2704
dcc36c16 2705 list_add(&entry->list, list);
b2a66aad 2706
2a8357f2 2707 return 0;
b2a66aad
AJ
2708}
2709
dcc36c16 2710int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2711{
2712 struct bdaddr_list *entry;
b2a66aad 2713
35f7498a 2714 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2715 hci_bdaddr_list_clear(list);
35f7498a
JH
2716 return 0;
2717 }
b2a66aad 2718
dcc36c16 2719 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2720 if (!entry)
2721 return -ENOENT;
2722
2723 list_del(&entry->list);
2724 kfree(entry);
2725
2726 return 0;
2727}
2728
15819a70
AG
2729/* This function requires the caller holds hdev->lock */
2730struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2731 bdaddr_t *addr, u8 addr_type)
2732{
2733 struct hci_conn_params *params;
2734
738f6185
JH
2735 /* The conn params list only contains identity addresses */
2736 if (!hci_is_identity_address(addr, addr_type))
2737 return NULL;
2738
15819a70
AG
2739 list_for_each_entry(params, &hdev->le_conn_params, list) {
2740 if (bacmp(&params->addr, addr) == 0 &&
2741 params->addr_type == addr_type) {
2742 return params;
2743 }
2744 }
2745
2746 return NULL;
2747}
2748
4b10966f 2749/* This function requires the caller holds hdev->lock */
501f8827
JH
2750struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2751 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2752{
912b42ef 2753 struct hci_conn_params *param;
a9b0a04c 2754
738f6185
JH
2755 /* The list only contains identity addresses */
2756 if (!hci_is_identity_address(addr, addr_type))
2757 return NULL;
a9b0a04c 2758
501f8827 2759 list_for_each_entry(param, list, action) {
912b42ef
JH
2760 if (bacmp(&param->addr, addr) == 0 &&
2761 param->addr_type == addr_type)
2762 return param;
4b10966f
MH
2763 }
2764
2765 return NULL;
a9b0a04c
AG
2766}
2767
15819a70 2768/* This function requires the caller holds hdev->lock */
51d167c0
MH
2769struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2770 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2771{
2772 struct hci_conn_params *params;
2773
c46245b3 2774 if (!hci_is_identity_address(addr, addr_type))
51d167c0 2775 return NULL;
a9b0a04c 2776
15819a70 2777 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2778 if (params)
51d167c0 2779 return params;
15819a70
AG
2780
2781 params = kzalloc(sizeof(*params), GFP_KERNEL);
2782 if (!params) {
2783 BT_ERR("Out of memory");
51d167c0 2784 return NULL;
15819a70
AG
2785 }
2786
2787 bacpy(&params->addr, addr);
2788 params->addr_type = addr_type;
cef952ce
AG
2789
2790 list_add(&params->list, &hdev->le_conn_params);
93450c75 2791 INIT_LIST_HEAD(&params->action);
cef952ce 2792
bf5b3c8b
MH
2793 params->conn_min_interval = hdev->le_conn_min_interval;
2794 params->conn_max_interval = hdev->le_conn_max_interval;
2795 params->conn_latency = hdev->le_conn_latency;
2796 params->supervision_timeout = hdev->le_supv_timeout;
2797 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2798
2799 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2800
51d167c0 2801 return params;
bf5b3c8b
MH
2802}
2803
f6c63249 2804static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2805{
f8aaf9b6 2806 if (params->conn) {
f161dd41 2807 hci_conn_drop(params->conn);
f8aaf9b6
JH
2808 hci_conn_put(params->conn);
2809 }
f161dd41 2810
95305baa 2811 list_del(&params->action);
15819a70
AG
2812 list_del(&params->list);
2813 kfree(params);
f6c63249
JH
2814}
2815
2816/* This function requires the caller holds hdev->lock */
2817void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2818{
2819 struct hci_conn_params *params;
2820
2821 params = hci_conn_params_lookup(hdev, addr, addr_type);
2822 if (!params)
2823 return;
2824
2825 hci_conn_params_free(params);
15819a70 2826
95305baa
JH
2827 hci_update_background_scan(hdev);
2828
15819a70
AG
2829 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2830}
2831
2832/* This function requires the caller holds hdev->lock */
55af49a8 2833void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2834{
2835 struct hci_conn_params *params, *tmp;
2836
2837 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
2838 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2839 continue;
15819a70
AG
2840 list_del(&params->list);
2841 kfree(params);
2842 }
2843
55af49a8 2844 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
2845}
2846
2847/* This function requires the caller holds hdev->lock */
373110c5 2848void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 2849{
15819a70 2850 struct hci_conn_params *params, *tmp;
77a77a30 2851
f6c63249
JH
2852 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2853 hci_conn_params_free(params);
77a77a30 2854
a4790dbd 2855 hci_update_background_scan(hdev);
77a77a30 2856
15819a70 2857 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
2858}
2859
1904a853 2860static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7ba8b4be 2861{
4c87eaab
AG
2862 if (status) {
2863 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2864
4c87eaab
AG
2865 hci_dev_lock(hdev);
2866 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2867 hci_dev_unlock(hdev);
2868 return;
2869 }
7ba8b4be
AG
2870}
2871
1904a853
MH
2872static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2873 u16 opcode)
7ba8b4be 2874{
4c87eaab
AG
2875 /* General inquiry access code (GIAC) */
2876 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4c87eaab 2877 struct hci_cp_inquiry cp;
7ba8b4be
AG
2878 int err;
2879
4c87eaab
AG
2880 if (status) {
2881 BT_ERR("Failed to disable LE scanning: status %d", status);
2882 return;
2883 }
7ba8b4be 2884
2d28cfe7
JP
2885 hdev->discovery.scan_start = 0;
2886
4c87eaab
AG
2887 switch (hdev->discovery.type) {
2888 case DISCOV_TYPE_LE:
2889 hci_dev_lock(hdev);
2890 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2891 hci_dev_unlock(hdev);
2892 break;
7ba8b4be 2893
4c87eaab 2894 case DISCOV_TYPE_INTERLEAVED:
4c87eaab 2895 hci_dev_lock(hdev);
7dbfac1d 2896
07d2334a
JP
2897 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2898 &hdev->quirks)) {
2899 /* If we were running LE only scan, change discovery
2900 * state. If we were running both LE and BR/EDR inquiry
2901 * simultaneously, and BR/EDR inquiry is already
2902 * finished, stop discovery, otherwise BR/EDR inquiry
2903 * will stop discovery when finished.
2904 */
2905 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2906 hci_discovery_set_state(hdev,
2907 DISCOVERY_STOPPED);
2908 } else {
baf880a9
JH
2909 struct hci_request req;
2910
07d2334a
JP
2911 hci_inquiry_cache_flush(hdev);
2912
baf880a9
JH
2913 hci_req_init(&req, hdev);
2914
2915 memset(&cp, 0, sizeof(cp));
2916 memcpy(&cp.lap, lap, sizeof(cp.lap));
2917 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2918 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2919
07d2334a
JP
2920 err = hci_req_run(&req, inquiry_complete);
2921 if (err) {
2922 BT_ERR("Inquiry request failed: err %d", err);
2923 hci_discovery_set_state(hdev,
2924 DISCOVERY_STOPPED);
2925 }
4c87eaab 2926 }
7dbfac1d 2927
4c87eaab
AG
2928 hci_dev_unlock(hdev);
2929 break;
7dbfac1d 2930 }
7dbfac1d
AG
2931}
2932
7ba8b4be
AG
2933static void le_scan_disable_work(struct work_struct *work)
2934{
2935 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2936 le_scan_disable.work);
4c87eaab
AG
2937 struct hci_request req;
2938 int err;
7ba8b4be
AG
2939
2940 BT_DBG("%s", hdev->name);
2941
2d28cfe7
JP
2942 cancel_delayed_work_sync(&hdev->le_scan_restart);
2943
4c87eaab 2944 hci_req_init(&req, hdev);
28b75a89 2945
b1efcc28 2946 hci_req_add_le_scan_disable(&req);
28b75a89 2947
4c87eaab
AG
2948 err = hci_req_run(&req, le_scan_disable_work_complete);
2949 if (err)
2950 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2951}
2952
2d28cfe7
JP
2953static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2954 u16 opcode)
2955{
2956 unsigned long timeout, duration, scan_start, now;
2957
2958 BT_DBG("%s", hdev->name);
2959
2960 if (status) {
2961 BT_ERR("Failed to restart LE scan: status %d", status);
2962 return;
2963 }
2964
2965 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2966 !hdev->discovery.scan_start)
2967 return;
2968
2969 /* When the scan was started, hdev->le_scan_disable has been queued
2970 * after duration from scan_start. During scan restart this job
2971 * has been canceled, and we need to queue it again after proper
2972 * timeout, to make sure that scan does not run indefinitely.
2973 */
2974 duration = hdev->discovery.scan_duration;
2975 scan_start = hdev->discovery.scan_start;
2976 now = jiffies;
2977 if (now - scan_start <= duration) {
2978 int elapsed;
2979
2980 if (now >= scan_start)
2981 elapsed = now - scan_start;
2982 else
2983 elapsed = ULONG_MAX - scan_start + now;
2984
2985 timeout = duration - elapsed;
2986 } else {
2987 timeout = 0;
2988 }
2989 queue_delayed_work(hdev->workqueue,
2990 &hdev->le_scan_disable, timeout);
2991}
2992
2993static void le_scan_restart_work(struct work_struct *work)
2994{
2995 struct hci_dev *hdev = container_of(work, struct hci_dev,
2996 le_scan_restart.work);
2997 struct hci_request req;
2998 struct hci_cp_le_set_scan_enable cp;
2999 int err;
3000
3001 BT_DBG("%s", hdev->name);
3002
3003 /* If controller is not scanning we are done. */
d7a5a11d 3004 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2d28cfe7
JP
3005 return;
3006
3007 hci_req_init(&req, hdev);
3008
3009 hci_req_add_le_scan_disable(&req);
3010
3011 memset(&cp, 0, sizeof(cp));
3012 cp.enable = LE_SCAN_ENABLE;
3013 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3014 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3015
3016 err = hci_req_run(&req, le_scan_restart_work_complete);
3017 if (err)
3018 BT_ERR("Restart LE scan request failed: err %d", err);
3019}
3020
a1f4c318
JH
3021/* Copy the Identity Address of the controller.
3022 *
3023 * If the controller has a public BD_ADDR, then by default use that one.
3024 * If this is a LE only controller without a public address, default to
3025 * the static random address.
3026 *
3027 * For debugging purposes it is possible to force controllers with a
3028 * public address to use the static random address instead.
50b5b952
MH
3029 *
3030 * In case BR/EDR has been disabled on a dual-mode controller and
3031 * userspace has configured a static address, then that address
3032 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3033 */
3034void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3035 u8 *bdaddr_type)
3036{
b7cb93e5 3037 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3038 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3039 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3040 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3041 bacpy(bdaddr, &hdev->static_addr);
3042 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3043 } else {
3044 bacpy(bdaddr, &hdev->bdaddr);
3045 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3046 }
3047}
3048
9be0dab7
DH
3049/* Alloc HCI device */
3050struct hci_dev *hci_alloc_dev(void)
3051{
3052 struct hci_dev *hdev;
3053
27f70f3e 3054 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3055 if (!hdev)
3056 return NULL;
3057
b1b813d4
DH
3058 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3059 hdev->esco_type = (ESCO_HV1);
3060 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3061 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3062 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3063 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3064 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3065 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3066
b1b813d4
DH
3067 hdev->sniff_max_interval = 800;
3068 hdev->sniff_min_interval = 80;
3069
3f959d46 3070 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3071 hdev->le_adv_min_interval = 0x0800;
3072 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3073 hdev->le_scan_interval = 0x0060;
3074 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3075 hdev->le_conn_min_interval = 0x0028;
3076 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3077 hdev->le_conn_latency = 0x0000;
3078 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3079 hdev->le_def_tx_len = 0x001b;
3080 hdev->le_def_tx_time = 0x0148;
3081 hdev->le_max_tx_len = 0x001b;
3082 hdev->le_max_tx_time = 0x0148;
3083 hdev->le_max_rx_len = 0x001b;
3084 hdev->le_max_rx_time = 0x0148;
bef64738 3085
d6bfd59c 3086 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3087 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3088 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3089 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3090
b1b813d4
DH
3091 mutex_init(&hdev->lock);
3092 mutex_init(&hdev->req_lock);
3093
3094 INIT_LIST_HEAD(&hdev->mgmt_pending);
3095 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3096 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3097 INIT_LIST_HEAD(&hdev->uuids);
3098 INIT_LIST_HEAD(&hdev->link_keys);
3099 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3100 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3101 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3102 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3103 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3104 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3105 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3106 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3107
3108 INIT_WORK(&hdev->rx_work, hci_rx_work);
3109 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3110 INIT_WORK(&hdev->tx_work, hci_tx_work);
3111 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3112 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3113
b1b813d4
DH
3114 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3115 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3116 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2d28cfe7 3117 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
b1b813d4 3118
b1b813d4
DH
3119 skb_queue_head_init(&hdev->rx_q);
3120 skb_queue_head_init(&hdev->cmd_q);
3121 skb_queue_head_init(&hdev->raw_q);
3122
3123 init_waitqueue_head(&hdev->req_wait_q);
3124
65cc2b49 3125 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3126
b1b813d4
DH
3127 hci_init_sysfs(hdev);
3128 discovery_init(hdev);
203fea01 3129 adv_info_init(hdev);
9be0dab7
DH
3130
3131 return hdev;
3132}
3133EXPORT_SYMBOL(hci_alloc_dev);
3134
3135/* Free HCI device */
3136void hci_free_dev(struct hci_dev *hdev)
3137{
9be0dab7
DH
3138 /* will free via device release */
3139 put_device(&hdev->dev);
3140}
3141EXPORT_SYMBOL(hci_free_dev);
3142
1da177e4
LT
3143/* Register HCI device */
3144int hci_register_dev(struct hci_dev *hdev)
3145{
b1b813d4 3146 int id, error;
1da177e4 3147
74292d5a 3148 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3149 return -EINVAL;
3150
08add513
MM
3151 /* Do not allow HCI_AMP devices to register at index 0,
3152 * so the index can be used as the AMP controller ID.
3153 */
3df92b31
SL
3154 switch (hdev->dev_type) {
3155 case HCI_BREDR:
3156 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3157 break;
3158 case HCI_AMP:
3159 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3160 break;
3161 default:
3162 return -EINVAL;
1da177e4 3163 }
8e87d142 3164
3df92b31
SL
3165 if (id < 0)
3166 return id;
3167
1da177e4
LT
3168 sprintf(hdev->name, "hci%d", id);
3169 hdev->id = id;
2d8b3a11
AE
3170
3171 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3172
d8537548
KC
3173 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3174 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3175 if (!hdev->workqueue) {
3176 error = -ENOMEM;
3177 goto err;
3178 }
f48fd9c8 3179
d8537548
KC
3180 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3181 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3182 if (!hdev->req_workqueue) {
3183 destroy_workqueue(hdev->workqueue);
3184 error = -ENOMEM;
3185 goto err;
3186 }
3187
0153e2ec
MH
3188 if (!IS_ERR_OR_NULL(bt_debugfs))
3189 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3190
bdc3e0f1
MH
3191 dev_set_name(&hdev->dev, "%s", hdev->name);
3192
3193 error = device_add(&hdev->dev);
33ca954d 3194 if (error < 0)
54506918 3195 goto err_wqueue;
1da177e4 3196
611b30f7 3197 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3198 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3199 hdev);
611b30f7
MH
3200 if (hdev->rfkill) {
3201 if (rfkill_register(hdev->rfkill) < 0) {
3202 rfkill_destroy(hdev->rfkill);
3203 hdev->rfkill = NULL;
3204 }
3205 }
3206
5e130367 3207 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3208 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3209
a1536da2
MH
3210 hci_dev_set_flag(hdev, HCI_SETUP);
3211 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3212
01cd3404 3213 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3214 /* Assume BR/EDR support until proven otherwise (such as
3215 * through reading supported features during init.
3216 */
a1536da2 3217 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3218 }
ce2be9ac 3219
fcee3377
GP
3220 write_lock(&hci_dev_list_lock);
3221 list_add(&hdev->list, &hci_dev_list);
3222 write_unlock(&hci_dev_list_lock);
3223
4a964404
MH
3224 /* Devices that are marked for raw-only usage are unconfigured
3225 * and should not be included in normal operation.
fee746b0
MH
3226 */
3227 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3228 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3229
1da177e4 3230 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3231 hci_dev_hold(hdev);
1da177e4 3232
19202573 3233 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3234
1da177e4 3235 return id;
f48fd9c8 3236
33ca954d
DH
3237err_wqueue:
3238 destroy_workqueue(hdev->workqueue);
6ead1bbc 3239 destroy_workqueue(hdev->req_workqueue);
33ca954d 3240err:
3df92b31 3241 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3242
33ca954d 3243 return error;
1da177e4
LT
3244}
3245EXPORT_SYMBOL(hci_register_dev);
3246
3247/* Unregister HCI device */
59735631 3248void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3249{
3df92b31 3250 int i, id;
ef222013 3251
c13854ce 3252 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3253
a1536da2 3254 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3255
3df92b31
SL
3256 id = hdev->id;
3257
f20d09d5 3258 write_lock(&hci_dev_list_lock);
1da177e4 3259 list_del(&hdev->list);
f20d09d5 3260 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3261
3262 hci_dev_do_close(hdev);
3263
cd4c5391 3264 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3265 kfree_skb(hdev->reassembly[i]);
3266
b9b5ef18
GP
3267 cancel_work_sync(&hdev->power_on);
3268
ab81cbf9 3269 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3270 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3271 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3272 hci_dev_lock(hdev);
744cf19e 3273 mgmt_index_removed(hdev);
09fd0de5 3274 hci_dev_unlock(hdev);
56e5cb86 3275 }
ab81cbf9 3276
2e58ef3e
JH
3277 /* mgmt_index_removed should take care of emptying the
3278 * pending list */
3279 BUG_ON(!list_empty(&hdev->mgmt_pending));
3280
1da177e4
LT
3281 hci_notify(hdev, HCI_DEV_UNREG);
3282
611b30f7
MH
3283 if (hdev->rfkill) {
3284 rfkill_unregister(hdev->rfkill);
3285 rfkill_destroy(hdev->rfkill);
3286 }
3287
bdc3e0f1 3288 device_del(&hdev->dev);
147e2d59 3289
0153e2ec
MH
3290 debugfs_remove_recursive(hdev->debugfs);
3291
f48fd9c8 3292 destroy_workqueue(hdev->workqueue);
6ead1bbc 3293 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3294
09fd0de5 3295 hci_dev_lock(hdev);
dcc36c16 3296 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3297 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3298 hci_uuids_clear(hdev);
55ed8ca1 3299 hci_link_keys_clear(hdev);
b899efaf 3300 hci_smp_ltks_clear(hdev);
970c4e46 3301 hci_smp_irks_clear(hdev);
2763eda6 3302 hci_remote_oob_data_clear(hdev);
dcc36c16 3303 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3304 hci_conn_params_clear_all(hdev);
22078800 3305 hci_discovery_filter_clear(hdev);
09fd0de5 3306 hci_dev_unlock(hdev);
e2e0cacb 3307
dc946bd8 3308 hci_dev_put(hdev);
3df92b31
SL
3309
3310 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3311}
3312EXPORT_SYMBOL(hci_unregister_dev);
3313
3314/* Suspend HCI device */
3315int hci_suspend_dev(struct hci_dev *hdev)
3316{
3317 hci_notify(hdev, HCI_DEV_SUSPEND);
3318 return 0;
3319}
3320EXPORT_SYMBOL(hci_suspend_dev);
3321
3322/* Resume HCI device */
3323int hci_resume_dev(struct hci_dev *hdev)
3324{
3325 hci_notify(hdev, HCI_DEV_RESUME);
3326 return 0;
3327}
3328EXPORT_SYMBOL(hci_resume_dev);
3329
75e0569f
MH
3330/* Reset HCI device */
3331int hci_reset_dev(struct hci_dev *hdev)
3332{
3333 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3334 struct sk_buff *skb;
3335
3336 skb = bt_skb_alloc(3, GFP_ATOMIC);
3337 if (!skb)
3338 return -ENOMEM;
3339
3340 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3341 memcpy(skb_put(skb, 3), hw_err, 3);
3342
3343 /* Send Hardware Error to upper stack */
3344 return hci_recv_frame(hdev, skb);
3345}
3346EXPORT_SYMBOL(hci_reset_dev);
3347
76bca880 3348/* Receive frame from HCI drivers */
e1a26170 3349int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3350{
76bca880 3351 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3352 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3353 kfree_skb(skb);
3354 return -ENXIO;
3355 }
3356
d82603c6 3357 /* Incoming skb */
76bca880
MH
3358 bt_cb(skb)->incoming = 1;
3359
3360 /* Time stamp */
3361 __net_timestamp(skb);
3362
76bca880 3363 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3364 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3365
76bca880
MH
3366 return 0;
3367}
3368EXPORT_SYMBOL(hci_recv_frame);
3369
33e882a5 3370static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3371 int count, __u8 index)
33e882a5
SS
3372{
3373 int len = 0;
3374 int hlen = 0;
3375 int remain = count;
3376 struct sk_buff *skb;
3377 struct bt_skb_cb *scb;
3378
3379 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3380 index >= NUM_REASSEMBLY)
33e882a5
SS
3381 return -EILSEQ;
3382
3383 skb = hdev->reassembly[index];
3384
3385 if (!skb) {
3386 switch (type) {
3387 case HCI_ACLDATA_PKT:
3388 len = HCI_MAX_FRAME_SIZE;
3389 hlen = HCI_ACL_HDR_SIZE;
3390 break;
3391 case HCI_EVENT_PKT:
3392 len = HCI_MAX_EVENT_SIZE;
3393 hlen = HCI_EVENT_HDR_SIZE;
3394 break;
3395 case HCI_SCODATA_PKT:
3396 len = HCI_MAX_SCO_SIZE;
3397 hlen = HCI_SCO_HDR_SIZE;
3398 break;
3399 }
3400
1e429f38 3401 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3402 if (!skb)
3403 return -ENOMEM;
3404
3405 scb = (void *) skb->cb;
3406 scb->expect = hlen;
3407 scb->pkt_type = type;
3408
33e882a5
SS
3409 hdev->reassembly[index] = skb;
3410 }
3411
3412 while (count) {
3413 scb = (void *) skb->cb;
89bb46d0 3414 len = min_t(uint, scb->expect, count);
33e882a5
SS
3415
3416 memcpy(skb_put(skb, len), data, len);
3417
3418 count -= len;
3419 data += len;
3420 scb->expect -= len;
3421 remain = count;
3422
3423 switch (type) {
3424 case HCI_EVENT_PKT:
3425 if (skb->len == HCI_EVENT_HDR_SIZE) {
3426 struct hci_event_hdr *h = hci_event_hdr(skb);
3427 scb->expect = h->plen;
3428
3429 if (skb_tailroom(skb) < scb->expect) {
3430 kfree_skb(skb);
3431 hdev->reassembly[index] = NULL;
3432 return -ENOMEM;
3433 }
3434 }
3435 break;
3436
3437 case HCI_ACLDATA_PKT:
3438 if (skb->len == HCI_ACL_HDR_SIZE) {
3439 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3440 scb->expect = __le16_to_cpu(h->dlen);
3441
3442 if (skb_tailroom(skb) < scb->expect) {
3443 kfree_skb(skb);
3444 hdev->reassembly[index] = NULL;
3445 return -ENOMEM;
3446 }
3447 }
3448 break;
3449
3450 case HCI_SCODATA_PKT:
3451 if (skb->len == HCI_SCO_HDR_SIZE) {
3452 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3453 scb->expect = h->dlen;
3454
3455 if (skb_tailroom(skb) < scb->expect) {
3456 kfree_skb(skb);
3457 hdev->reassembly[index] = NULL;
3458 return -ENOMEM;
3459 }
3460 }
3461 break;
3462 }
3463
3464 if (scb->expect == 0) {
3465 /* Complete frame */
3466
3467 bt_cb(skb)->pkt_type = type;
e1a26170 3468 hci_recv_frame(hdev, skb);
33e882a5
SS
3469
3470 hdev->reassembly[index] = NULL;
3471 return remain;
3472 }
3473 }
3474
3475 return remain;
3476}
3477
99811510
SS
3478#define STREAM_REASSEMBLY 0
3479
3480int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3481{
3482 int type;
3483 int rem = 0;
3484
da5f6c37 3485 while (count) {
99811510
SS
3486 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3487
3488 if (!skb) {
3489 struct { char type; } *pkt;
3490
3491 /* Start of the frame */
3492 pkt = data;
3493 type = pkt->type;
3494
3495 data++;
3496 count--;
3497 } else
3498 type = bt_cb(skb)->pkt_type;
3499
1e429f38 3500 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3501 STREAM_REASSEMBLY);
99811510
SS
3502 if (rem < 0)
3503 return rem;
3504
3505 data += (count - rem);
3506 count = rem;
f81c6224 3507 }
99811510
SS
3508
3509 return rem;
3510}
3511EXPORT_SYMBOL(hci_recv_stream_fragment);
3512
1da177e4
LT
3513/* ---- Interface to upper protocols ---- */
3514
1da177e4
LT
3515int hci_register_cb(struct hci_cb *cb)
3516{
3517 BT_DBG("%p name %s", cb, cb->name);
3518
fba7ecf0 3519 mutex_lock(&hci_cb_list_lock);
00629e0f 3520 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3521 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3522
3523 return 0;
3524}
3525EXPORT_SYMBOL(hci_register_cb);
3526
3527int hci_unregister_cb(struct hci_cb *cb)
3528{
3529 BT_DBG("%p name %s", cb, cb->name);
3530
fba7ecf0 3531 mutex_lock(&hci_cb_list_lock);
1da177e4 3532 list_del(&cb->list);
fba7ecf0 3533 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3534
3535 return 0;
3536}
3537EXPORT_SYMBOL(hci_unregister_cb);
3538
51086991 3539static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3540{
cdc52faa
MH
3541 int err;
3542
0d48d939 3543 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3544
cd82e61c
MH
3545 /* Time stamp */
3546 __net_timestamp(skb);
1da177e4 3547
cd82e61c
MH
3548 /* Send copy to monitor */
3549 hci_send_to_monitor(hdev, skb);
3550
3551 if (atomic_read(&hdev->promisc)) {
3552 /* Send copy to the sockets */
470fe1b5 3553 hci_send_to_sock(hdev, skb);
1da177e4
LT
3554 }
3555
3556 /* Get rid of skb owner, prior to sending to the driver. */
3557 skb_orphan(skb);
3558
cdc52faa
MH
3559 err = hdev->send(hdev, skb);
3560 if (err < 0) {
3561 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3562 kfree_skb(skb);
3563 }
1da177e4
LT
3564}
3565
899de765
MH
3566bool hci_req_pending(struct hci_dev *hdev)
3567{
3568 return (hdev->req_status == HCI_REQ_PEND);
3569}
3570
1ca3a9d0 3571/* Send HCI command */
07dc93dd
JH
3572int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3573 const void *param)
1ca3a9d0
JH
3574{
3575 struct sk_buff *skb;
3576
3577 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3578
3579 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3580 if (!skb) {
3581 BT_ERR("%s no memory for command", hdev->name);
3582 return -ENOMEM;
3583 }
3584
49c922bb 3585 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3586 * single-command requests.
3587 */
db6e3e8d 3588 bt_cb(skb)->req.start = true;
11714b3d 3589
1da177e4 3590 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3591 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3592
3593 return 0;
3594}
1da177e4
LT
3595
3596/* Get data from the previously sent command */
a9de9248 3597void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3598{
3599 struct hci_command_hdr *hdr;
3600
3601 if (!hdev->sent_cmd)
3602 return NULL;
3603
3604 hdr = (void *) hdev->sent_cmd->data;
3605
a9de9248 3606 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3607 return NULL;
3608
f0e09510 3609 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3610
3611 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3612}
3613
3614/* Send ACL data */
3615static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3616{
3617 struct hci_acl_hdr *hdr;
3618 int len = skb->len;
3619
badff6d0
ACM
3620 skb_push(skb, HCI_ACL_HDR_SIZE);
3621 skb_reset_transport_header(skb);
9c70220b 3622 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3623 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3624 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3625}
3626
ee22be7e 3627static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3628 struct sk_buff *skb, __u16 flags)
1da177e4 3629{
ee22be7e 3630 struct hci_conn *conn = chan->conn;
1da177e4
LT
3631 struct hci_dev *hdev = conn->hdev;
3632 struct sk_buff *list;
3633
087bfd99
GP
3634 skb->len = skb_headlen(skb);
3635 skb->data_len = 0;
3636
3637 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3638
3639 switch (hdev->dev_type) {
3640 case HCI_BREDR:
3641 hci_add_acl_hdr(skb, conn->handle, flags);
3642 break;
3643 case HCI_AMP:
3644 hci_add_acl_hdr(skb, chan->handle, flags);
3645 break;
3646 default:
3647 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3648 return;
3649 }
087bfd99 3650
70f23020
AE
3651 list = skb_shinfo(skb)->frag_list;
3652 if (!list) {
1da177e4
LT
3653 /* Non fragmented */
3654 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3655
73d80deb 3656 skb_queue_tail(queue, skb);
1da177e4
LT
3657 } else {
3658 /* Fragmented */
3659 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3660
3661 skb_shinfo(skb)->frag_list = NULL;
3662
9cfd5a23
JR
3663 /* Queue all fragments atomically. We need to use spin_lock_bh
3664 * here because of 6LoWPAN links, as there this function is
3665 * called from softirq and using normal spin lock could cause
3666 * deadlocks.
3667 */
3668 spin_lock_bh(&queue->lock);
1da177e4 3669
73d80deb 3670 __skb_queue_tail(queue, skb);
e702112f
AE
3671
3672 flags &= ~ACL_START;
3673 flags |= ACL_CONT;
1da177e4
LT
3674 do {
3675 skb = list; list = list->next;
8e87d142 3676
0d48d939 3677 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3678 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3679
3680 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3681
73d80deb 3682 __skb_queue_tail(queue, skb);
1da177e4
LT
3683 } while (list);
3684
9cfd5a23 3685 spin_unlock_bh(&queue->lock);
1da177e4 3686 }
73d80deb
LAD
3687}
3688
3689void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3690{
ee22be7e 3691 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3692
f0e09510 3693 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3694
ee22be7e 3695 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3696
3eff45ea 3697 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3698}
1da177e4
LT
3699
3700/* Send SCO data */
0d861d8b 3701void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3702{
3703 struct hci_dev *hdev = conn->hdev;
3704 struct hci_sco_hdr hdr;
3705
3706 BT_DBG("%s len %d", hdev->name, skb->len);
3707
aca3192c 3708 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3709 hdr.dlen = skb->len;
3710
badff6d0
ACM
3711 skb_push(skb, HCI_SCO_HDR_SIZE);
3712 skb_reset_transport_header(skb);
9c70220b 3713 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3714
0d48d939 3715 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3716
1da177e4 3717 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3718 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3719}
1da177e4
LT
3720
3721/* ---- HCI TX task (outgoing data) ---- */
3722
3723/* HCI Connection scheduler */
6039aa73
GP
3724static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3725 int *quote)
1da177e4
LT
3726{
3727 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3728 struct hci_conn *conn = NULL, *c;
abc5de8f 3729 unsigned int num = 0, min = ~0;
1da177e4 3730
8e87d142 3731 /* We don't have to lock device here. Connections are always
1da177e4 3732 * added and removed with TX task disabled. */
bf4c6325
GP
3733
3734 rcu_read_lock();
3735
3736 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3737 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3738 continue;
769be974
MH
3739
3740 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3741 continue;
3742
1da177e4
LT
3743 num++;
3744
3745 if (c->sent < min) {
3746 min = c->sent;
3747 conn = c;
3748 }
52087a79
LAD
3749
3750 if (hci_conn_num(hdev, type) == num)
3751 break;
1da177e4
LT
3752 }
3753
bf4c6325
GP
3754 rcu_read_unlock();
3755
1da177e4 3756 if (conn) {
6ed58ec5
VT
3757 int cnt, q;
3758
3759 switch (conn->type) {
3760 case ACL_LINK:
3761 cnt = hdev->acl_cnt;
3762 break;
3763 case SCO_LINK:
3764 case ESCO_LINK:
3765 cnt = hdev->sco_cnt;
3766 break;
3767 case LE_LINK:
3768 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3769 break;
3770 default:
3771 cnt = 0;
3772 BT_ERR("Unknown link type");
3773 }
3774
3775 q = cnt / num;
1da177e4
LT
3776 *quote = q ? q : 1;
3777 } else
3778 *quote = 0;
3779
3780 BT_DBG("conn %p quote %d", conn, *quote);
3781 return conn;
3782}
3783
6039aa73 3784static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3785{
3786 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3787 struct hci_conn *c;
1da177e4 3788
bae1f5d9 3789 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3790
bf4c6325
GP
3791 rcu_read_lock();
3792
1da177e4 3793 /* Kill stalled connections */
bf4c6325 3794 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3795 if (c->type == type && c->sent) {
6ed93dc6
AE
3796 BT_ERR("%s killing stalled connection %pMR",
3797 hdev->name, &c->dst);
bed71748 3798 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3799 }
3800 }
bf4c6325
GP
3801
3802 rcu_read_unlock();
1da177e4
LT
3803}
3804
6039aa73
GP
3805static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3806 int *quote)
1da177e4 3807{
73d80deb
LAD
3808 struct hci_conn_hash *h = &hdev->conn_hash;
3809 struct hci_chan *chan = NULL;
abc5de8f 3810 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3811 struct hci_conn *conn;
73d80deb
LAD
3812 int cnt, q, conn_num = 0;
3813
3814 BT_DBG("%s", hdev->name);
3815
bf4c6325
GP
3816 rcu_read_lock();
3817
3818 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3819 struct hci_chan *tmp;
3820
3821 if (conn->type != type)
3822 continue;
3823
3824 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3825 continue;
3826
3827 conn_num++;
3828
8192edef 3829 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3830 struct sk_buff *skb;
3831
3832 if (skb_queue_empty(&tmp->data_q))
3833 continue;
3834
3835 skb = skb_peek(&tmp->data_q);
3836 if (skb->priority < cur_prio)
3837 continue;
3838
3839 if (skb->priority > cur_prio) {
3840 num = 0;
3841 min = ~0;
3842 cur_prio = skb->priority;
3843 }
3844
3845 num++;
3846
3847 if (conn->sent < min) {
3848 min = conn->sent;
3849 chan = tmp;
3850 }
3851 }
3852
3853 if (hci_conn_num(hdev, type) == conn_num)
3854 break;
3855 }
3856
bf4c6325
GP
3857 rcu_read_unlock();
3858
73d80deb
LAD
3859 if (!chan)
3860 return NULL;
3861
3862 switch (chan->conn->type) {
3863 case ACL_LINK:
3864 cnt = hdev->acl_cnt;
3865 break;
bd1eb66b
AE
3866 case AMP_LINK:
3867 cnt = hdev->block_cnt;
3868 break;
73d80deb
LAD
3869 case SCO_LINK:
3870 case ESCO_LINK:
3871 cnt = hdev->sco_cnt;
3872 break;
3873 case LE_LINK:
3874 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3875 break;
3876 default:
3877 cnt = 0;
3878 BT_ERR("Unknown link type");
3879 }
3880
3881 q = cnt / num;
3882 *quote = q ? q : 1;
3883 BT_DBG("chan %p quote %d", chan, *quote);
3884 return chan;
3885}
3886
02b20f0b
LAD
3887static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3888{
3889 struct hci_conn_hash *h = &hdev->conn_hash;
3890 struct hci_conn *conn;
3891 int num = 0;
3892
3893 BT_DBG("%s", hdev->name);
3894
bf4c6325
GP
3895 rcu_read_lock();
3896
3897 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3898 struct hci_chan *chan;
3899
3900 if (conn->type != type)
3901 continue;
3902
3903 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3904 continue;
3905
3906 num++;
3907
8192edef 3908 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3909 struct sk_buff *skb;
3910
3911 if (chan->sent) {
3912 chan->sent = 0;
3913 continue;
3914 }
3915
3916 if (skb_queue_empty(&chan->data_q))
3917 continue;
3918
3919 skb = skb_peek(&chan->data_q);
3920 if (skb->priority >= HCI_PRIO_MAX - 1)
3921 continue;
3922
3923 skb->priority = HCI_PRIO_MAX - 1;
3924
3925 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3926 skb->priority);
02b20f0b
LAD
3927 }
3928
3929 if (hci_conn_num(hdev, type) == num)
3930 break;
3931 }
bf4c6325
GP
3932
3933 rcu_read_unlock();
3934
02b20f0b
LAD
3935}
3936
b71d385a
AE
3937static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3938{
3939 /* Calculate count of blocks used by this packet */
3940 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3941}
3942
6039aa73 3943static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3944{
d7a5a11d 3945 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
3946 /* ACL tx timeout must be longer than maximum
3947 * link supervision timeout (40.9 seconds) */
63d2bc1b 3948 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3949 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3950 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3951 }
63d2bc1b 3952}
1da177e4 3953
6039aa73 3954static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3955{
3956 unsigned int cnt = hdev->acl_cnt;
3957 struct hci_chan *chan;
3958 struct sk_buff *skb;
3959 int quote;
3960
3961 __check_timeout(hdev, cnt);
04837f64 3962
73d80deb 3963 while (hdev->acl_cnt &&
a8c5fb1a 3964 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3965 u32 priority = (skb_peek(&chan->data_q))->priority;
3966 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3967 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3968 skb->len, skb->priority);
73d80deb 3969
ec1cce24
LAD
3970 /* Stop if priority has changed */
3971 if (skb->priority < priority)
3972 break;
3973
3974 skb = skb_dequeue(&chan->data_q);
3975
73d80deb 3976 hci_conn_enter_active_mode(chan->conn,
04124681 3977 bt_cb(skb)->force_active);
04837f64 3978
57d17d70 3979 hci_send_frame(hdev, skb);
1da177e4
LT
3980 hdev->acl_last_tx = jiffies;
3981
3982 hdev->acl_cnt--;
73d80deb
LAD
3983 chan->sent++;
3984 chan->conn->sent++;
1da177e4
LT
3985 }
3986 }
02b20f0b
LAD
3987
3988 if (cnt != hdev->acl_cnt)
3989 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3990}
3991
6039aa73 3992static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3993{
63d2bc1b 3994 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3995 struct hci_chan *chan;
3996 struct sk_buff *skb;
3997 int quote;
bd1eb66b 3998 u8 type;
b71d385a 3999
63d2bc1b 4000 __check_timeout(hdev, cnt);
b71d385a 4001
bd1eb66b
AE
4002 BT_DBG("%s", hdev->name);
4003
4004 if (hdev->dev_type == HCI_AMP)
4005 type = AMP_LINK;
4006 else
4007 type = ACL_LINK;
4008
b71d385a 4009 while (hdev->block_cnt > 0 &&
bd1eb66b 4010 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4011 u32 priority = (skb_peek(&chan->data_q))->priority;
4012 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4013 int blocks;
4014
4015 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4016 skb->len, skb->priority);
b71d385a
AE
4017
4018 /* Stop if priority has changed */
4019 if (skb->priority < priority)
4020 break;
4021
4022 skb = skb_dequeue(&chan->data_q);
4023
4024 blocks = __get_blocks(hdev, skb);
4025 if (blocks > hdev->block_cnt)
4026 return;
4027
4028 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4029 bt_cb(skb)->force_active);
b71d385a 4030
57d17d70 4031 hci_send_frame(hdev, skb);
b71d385a
AE
4032 hdev->acl_last_tx = jiffies;
4033
4034 hdev->block_cnt -= blocks;
4035 quote -= blocks;
4036
4037 chan->sent += blocks;
4038 chan->conn->sent += blocks;
4039 }
4040 }
4041
4042 if (cnt != hdev->block_cnt)
bd1eb66b 4043 hci_prio_recalculate(hdev, type);
b71d385a
AE
4044}
4045
6039aa73 4046static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4047{
4048 BT_DBG("%s", hdev->name);
4049
bd1eb66b
AE
4050 /* No ACL link over BR/EDR controller */
4051 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4052 return;
4053
4054 /* No AMP link over AMP controller */
4055 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4056 return;
4057
4058 switch (hdev->flow_ctl_mode) {
4059 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4060 hci_sched_acl_pkt(hdev);
4061 break;
4062
4063 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4064 hci_sched_acl_blk(hdev);
4065 break;
4066 }
4067}
4068
1da177e4 4069/* Schedule SCO */
6039aa73 4070static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4071{
4072 struct hci_conn *conn;
4073 struct sk_buff *skb;
4074 int quote;
4075
4076 BT_DBG("%s", hdev->name);
4077
52087a79
LAD
4078 if (!hci_conn_num(hdev, SCO_LINK))
4079 return;
4080
1da177e4
LT
4081 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4082 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4083 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4084 hci_send_frame(hdev, skb);
1da177e4
LT
4085
4086 conn->sent++;
4087 if (conn->sent == ~0)
4088 conn->sent = 0;
4089 }
4090 }
4091}
4092
6039aa73 4093static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4094{
4095 struct hci_conn *conn;
4096 struct sk_buff *skb;
4097 int quote;
4098
4099 BT_DBG("%s", hdev->name);
4100
52087a79
LAD
4101 if (!hci_conn_num(hdev, ESCO_LINK))
4102 return;
4103
8fc9ced3
GP
4104 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4105 &quote))) {
b6a0dc82
MH
4106 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4107 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4108 hci_send_frame(hdev, skb);
b6a0dc82
MH
4109
4110 conn->sent++;
4111 if (conn->sent == ~0)
4112 conn->sent = 0;
4113 }
4114 }
4115}
4116
6039aa73 4117static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4118{
73d80deb 4119 struct hci_chan *chan;
6ed58ec5 4120 struct sk_buff *skb;
02b20f0b 4121 int quote, cnt, tmp;
6ed58ec5
VT
4122
4123 BT_DBG("%s", hdev->name);
4124
52087a79
LAD
4125 if (!hci_conn_num(hdev, LE_LINK))
4126 return;
4127
d7a5a11d 4128 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
4129 /* LE tx timeout must be longer than maximum
4130 * link supervision timeout (40.9 seconds) */
bae1f5d9 4131 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4132 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4133 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4134 }
4135
4136 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4137 tmp = cnt;
73d80deb 4138 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4139 u32 priority = (skb_peek(&chan->data_q))->priority;
4140 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4141 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4142 skb->len, skb->priority);
6ed58ec5 4143
ec1cce24
LAD
4144 /* Stop if priority has changed */
4145 if (skb->priority < priority)
4146 break;
4147
4148 skb = skb_dequeue(&chan->data_q);
4149
57d17d70 4150 hci_send_frame(hdev, skb);
6ed58ec5
VT
4151 hdev->le_last_tx = jiffies;
4152
4153 cnt--;
73d80deb
LAD
4154 chan->sent++;
4155 chan->conn->sent++;
6ed58ec5
VT
4156 }
4157 }
73d80deb 4158
6ed58ec5
VT
4159 if (hdev->le_pkts)
4160 hdev->le_cnt = cnt;
4161 else
4162 hdev->acl_cnt = cnt;
02b20f0b
LAD
4163
4164 if (cnt != tmp)
4165 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4166}
4167
3eff45ea 4168static void hci_tx_work(struct work_struct *work)
1da177e4 4169{
3eff45ea 4170 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4171 struct sk_buff *skb;
4172
6ed58ec5 4173 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4174 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4175
d7a5a11d 4176 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
4177 /* Schedule queues and send stuff to HCI driver */
4178 hci_sched_acl(hdev);
4179 hci_sched_sco(hdev);
4180 hci_sched_esco(hdev);
4181 hci_sched_le(hdev);
4182 }
6ed58ec5 4183
1da177e4
LT
4184 /* Send next queued raw (unknown type) packet */
4185 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4186 hci_send_frame(hdev, skb);
1da177e4
LT
4187}
4188
25985edc 4189/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4190
4191/* ACL data packet */
6039aa73 4192static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4193{
4194 struct hci_acl_hdr *hdr = (void *) skb->data;
4195 struct hci_conn *conn;
4196 __u16 handle, flags;
4197
4198 skb_pull(skb, HCI_ACL_HDR_SIZE);
4199
4200 handle = __le16_to_cpu(hdr->handle);
4201 flags = hci_flags(handle);
4202 handle = hci_handle(handle);
4203
f0e09510 4204 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4205 handle, flags);
1da177e4
LT
4206
4207 hdev->stat.acl_rx++;
4208
4209 hci_dev_lock(hdev);
4210 conn = hci_conn_hash_lookup_handle(hdev, handle);
4211 hci_dev_unlock(hdev);
8e87d142 4212
1da177e4 4213 if (conn) {
65983fc7 4214 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4215
1da177e4 4216 /* Send to upper protocol */
686ebf28
UF
4217 l2cap_recv_acldata(conn, skb, flags);
4218 return;
1da177e4 4219 } else {
8e87d142 4220 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4221 hdev->name, handle);
1da177e4
LT
4222 }
4223
4224 kfree_skb(skb);
4225}
4226
4227/* SCO data packet */
6039aa73 4228static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4229{
4230 struct hci_sco_hdr *hdr = (void *) skb->data;
4231 struct hci_conn *conn;
4232 __u16 handle;
4233
4234 skb_pull(skb, HCI_SCO_HDR_SIZE);
4235
4236 handle = __le16_to_cpu(hdr->handle);
4237
f0e09510 4238 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4239
4240 hdev->stat.sco_rx++;
4241
4242 hci_dev_lock(hdev);
4243 conn = hci_conn_hash_lookup_handle(hdev, handle);
4244 hci_dev_unlock(hdev);
4245
4246 if (conn) {
1da177e4 4247 /* Send to upper protocol */
686ebf28
UF
4248 sco_recv_scodata(conn, skb);
4249 return;
1da177e4 4250 } else {
8e87d142 4251 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4252 hdev->name, handle);
1da177e4
LT
4253 }
4254
4255 kfree_skb(skb);
4256}
4257
9238f36a
JH
4258static bool hci_req_is_complete(struct hci_dev *hdev)
4259{
4260 struct sk_buff *skb;
4261
4262 skb = skb_peek(&hdev->cmd_q);
4263 if (!skb)
4264 return true;
4265
db6e3e8d 4266 return bt_cb(skb)->req.start;
9238f36a
JH
4267}
4268
42c6b129
JH
4269static void hci_resend_last(struct hci_dev *hdev)
4270{
4271 struct hci_command_hdr *sent;
4272 struct sk_buff *skb;
4273 u16 opcode;
4274
4275 if (!hdev->sent_cmd)
4276 return;
4277
4278 sent = (void *) hdev->sent_cmd->data;
4279 opcode = __le16_to_cpu(sent->opcode);
4280 if (opcode == HCI_OP_RESET)
4281 return;
4282
4283 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4284 if (!skb)
4285 return;
4286
4287 skb_queue_head(&hdev->cmd_q, skb);
4288 queue_work(hdev->workqueue, &hdev->cmd_work);
4289}
4290
e6214487
JH
4291void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4292 hci_req_complete_t *req_complete,
4293 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4294{
9238f36a
JH
4295 struct sk_buff *skb;
4296 unsigned long flags;
4297
4298 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4299
42c6b129
JH
4300 /* If the completed command doesn't match the last one that was
4301 * sent we need to do special handling of it.
9238f36a 4302 */
42c6b129
JH
4303 if (!hci_sent_cmd_data(hdev, opcode)) {
4304 /* Some CSR based controllers generate a spontaneous
4305 * reset complete event during init and any pending
4306 * command will never be completed. In such a case we
4307 * need to resend whatever was the last sent
4308 * command.
4309 */
4310 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4311 hci_resend_last(hdev);
4312
9238f36a 4313 return;
42c6b129 4314 }
9238f36a
JH
4315
4316 /* If the command succeeded and there's still more commands in
4317 * this request the request is not yet complete.
4318 */
4319 if (!status && !hci_req_is_complete(hdev))
4320 return;
4321
4322 /* If this was the last command in a request the complete
4323 * callback would be found in hdev->sent_cmd instead of the
4324 * command queue (hdev->cmd_q).
4325 */
e6214487
JH
4326 if (bt_cb(hdev->sent_cmd)->req.complete) {
4327 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4328 return;
4329 }
53e21fbc 4330
e6214487
JH
4331 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4332 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4333 return;
9238f36a
JH
4334 }
4335
4336 /* Remove all pending commands belonging to this request */
4337 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4338 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
db6e3e8d 4339 if (bt_cb(skb)->req.start) {
9238f36a
JH
4340 __skb_queue_head(&hdev->cmd_q, skb);
4341 break;
4342 }
4343
e6214487
JH
4344 *req_complete = bt_cb(skb)->req.complete;
4345 *req_complete_skb = bt_cb(skb)->req.complete_skb;
9238f36a
JH
4346 kfree_skb(skb);
4347 }
4348 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4349}
4350
b78752cc 4351static void hci_rx_work(struct work_struct *work)
1da177e4 4352{
b78752cc 4353 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4354 struct sk_buff *skb;
4355
4356 BT_DBG("%s", hdev->name);
4357
1da177e4 4358 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4359 /* Send copy to monitor */
4360 hci_send_to_monitor(hdev, skb);
4361
1da177e4
LT
4362 if (atomic_read(&hdev->promisc)) {
4363 /* Send copy to the sockets */
470fe1b5 4364 hci_send_to_sock(hdev, skb);
1da177e4
LT
4365 }
4366
d7a5a11d 4367 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4368 kfree_skb(skb);
4369 continue;
4370 }
4371
4372 if (test_bit(HCI_INIT, &hdev->flags)) {
4373 /* Don't process data packets in this states. */
0d48d939 4374 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4375 case HCI_ACLDATA_PKT:
4376 case HCI_SCODATA_PKT:
4377 kfree_skb(skb);
4378 continue;
3ff50b79 4379 }
1da177e4
LT
4380 }
4381
4382 /* Process frame */
0d48d939 4383 switch (bt_cb(skb)->pkt_type) {
1da177e4 4384 case HCI_EVENT_PKT:
b78752cc 4385 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4386 hci_event_packet(hdev, skb);
4387 break;
4388
4389 case HCI_ACLDATA_PKT:
4390 BT_DBG("%s ACL data packet", hdev->name);
4391 hci_acldata_packet(hdev, skb);
4392 break;
4393
4394 case HCI_SCODATA_PKT:
4395 BT_DBG("%s SCO data packet", hdev->name);
4396 hci_scodata_packet(hdev, skb);
4397 break;
4398
4399 default:
4400 kfree_skb(skb);
4401 break;
4402 }
4403 }
1da177e4
LT
4404}
4405
c347b765 4406static void hci_cmd_work(struct work_struct *work)
1da177e4 4407{
c347b765 4408 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4409 struct sk_buff *skb;
4410
2104786b
AE
4411 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4412 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4413
1da177e4 4414 /* Send queued commands */
5a08ecce
AE
4415 if (atomic_read(&hdev->cmd_cnt)) {
4416 skb = skb_dequeue(&hdev->cmd_q);
4417 if (!skb)
4418 return;
4419
7585b97a 4420 kfree_skb(hdev->sent_cmd);
1da177e4 4421
a675d7f1 4422 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4423 if (hdev->sent_cmd) {
1da177e4 4424 atomic_dec(&hdev->cmd_cnt);
57d17d70 4425 hci_send_frame(hdev, skb);
7bdb8a5c 4426 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4427 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4428 else
65cc2b49
MH
4429 schedule_delayed_work(&hdev->cmd_timer,
4430 HCI_CMD_TIMEOUT);
1da177e4
LT
4431 } else {
4432 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4433 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4434 }
4435 }
4436}
This page took 1.311875 seconds and 5 git commands to generate.