Merge git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
77a63e0a
FW
82static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
75e84b7c
JH
84{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
7b1abbbe
JH
107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
75e84b7c
JH
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
7b1abbbe 137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 138 const void *param, u8 event, u32 timeout)
75e84b7c
JH
139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
7b1abbbe 148 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
7b1abbbe
JH
187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 192 const void *param, u32 timeout)
7b1abbbe
JH
193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
1da177e4 198/* Execute request and wait for completion. */
01178cd4 199static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
200 void (*func)(struct hci_request *req,
201 unsigned long opt),
01178cd4 202 unsigned long opt, __u32 timeout)
1da177e4 203{
42c6b129 204 struct hci_request req;
1da177e4
LT
205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
42c6b129
JH
210 hci_req_init(&req, hdev);
211
1da177e4
LT
212 hdev->req_status = HCI_REQ_PEND;
213
42c6b129 214 func(&req, opt);
53cce22d 215
42c6b129
JH
216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
53cce22d 218 hdev->req_status = 0;
920c8300
AG
219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
42c6b129 224 */
920c8300
AG
225 if (err == -ENODATA)
226 return 0;
227
228 return err;
53cce22d
JH
229 }
230
bc4445c7
AG
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
1da177e4
LT
234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
e175072f 243 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
3ff50b79 253 }
1da177e4 254
a5040efa 255 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
01178cd4 262static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
263 void (*req)(struct hci_request *req,
264 unsigned long opt),
01178cd4 265 unsigned long opt, __u32 timeout)
1da177e4
LT
266{
267 int ret;
268
7c6a329e
MH
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
1da177e4
LT
272 /* Serialize all requests */
273 hci_req_lock(hdev);
01178cd4 274 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
42c6b129 280static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 281{
42c6b129 282 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
283
284 /* Reset device */
42c6b129
JH
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
287}
288
42c6b129 289static void bredr_init(struct hci_request *req)
1da177e4 290{
42c6b129 291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 292
1da177e4 293 /* Read Local Supported Features */
42c6b129 294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 295
1143e5a6 296 /* Read Local Version */
42c6b129 297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
298
299 /* Read BD Address */
42c6b129 300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
301}
302
42c6b129 303static void amp_init(struct hci_request *req)
e61ef499 304{
42c6b129 305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 306
e61ef499 307 /* Read Local Version */
42c6b129 308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
309
310 /* Read Local AMP Info */
42c6b129 311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
312
313 /* Read Data Blk size */
42c6b129 314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
315}
316
42c6b129 317static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 318{
42c6b129 319 struct hci_dev *hdev = req->hdev;
e61ef499
AE
320
321 BT_DBG("%s %ld", hdev->name, opt);
322
11778716
AE
323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 325 hci_reset_req(req, 0);
11778716 326
e61ef499
AE
327 switch (hdev->dev_type) {
328 case HCI_BREDR:
42c6b129 329 bredr_init(req);
e61ef499
AE
330 break;
331
332 case HCI_AMP:
42c6b129 333 amp_init(req);
e61ef499
AE
334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
e61ef499
AE
340}
341
42c6b129 342static void bredr_setup(struct hci_request *req)
2177bab5 343{
2177bab5
JH
344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
349
350 /* Read Class of Device */
42c6b129 351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
352
353 /* Read Local Name */
42c6b129 354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
355
356 /* Read Voice Setting */
42c6b129 357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
42c6b129 365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 366
f332ec66
JH
367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
2177bab5
JH
372}
373
42c6b129 374static void le_setup(struct hci_request *req)
2177bab5 375{
c73eee91
JH
376 struct hci_dev *hdev = req->hdev;
377
2177bab5 378 /* Read LE Buffer Size */
42c6b129 379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
380
381 /* Read LE Local Supported Features */
42c6b129 382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
383
384 /* Read LE Advertising Channel TX Power */
42c6b129 385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
386
387 /* Read LE White List Size */
42c6b129 388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
389
390 /* Read LE Supported States */
42c6b129 391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
42c6b129 426static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
427{
428 u8 mode;
429
42c6b129 430 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 431
42c6b129 432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
433}
434
42c6b129 435static void hci_setup_event_mask(struct hci_request *req)
2177bab5 436{
42c6b129
JH
437 struct hci_dev *hdev = req->hdev;
438
2177bab5
JH
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
42c6b129 506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
42c6b129
JH
511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
2177bab5
JH
513 }
514}
515
42c6b129 516static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 517{
42c6b129
JH
518 struct hci_dev *hdev = req->hdev;
519
2177bab5 520 if (lmp_bredr_capable(hdev))
42c6b129 521 bredr_setup(req);
2177bab5
JH
522
523 if (lmp_le_capable(hdev))
42c6b129 524 le_setup(req);
2177bab5 525
42c6b129 526 hci_setup_event_mask(req);
2177bab5 527
3f8e2d75
JH
528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
530 */
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
533
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536 u8 mode = 0x01;
42c6b129
JH
537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
2177bab5
JH
539 } else {
540 struct hci_cp_write_eir cp;
541
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
544
42c6b129 545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
546 }
547 }
548
549 if (lmp_inq_rssi_capable(hdev))
42c6b129 550 hci_setup_inquiry_mode(req);
2177bab5
JH
551
552 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
554
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
557
558 cp.page = 0x01;
42c6b129
JH
559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560 sizeof(cp), &cp);
2177bab5
JH
561 }
562
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564 u8 enable = 1;
42c6b129
JH
565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566 &enable);
2177bab5
JH
567 }
568}
569
42c6b129 570static void hci_setup_link_policy(struct hci_request *req)
2177bab5 571{
42c6b129 572 struct hci_dev *hdev = req->hdev;
2177bab5
JH
573 struct hci_cp_write_def_link_policy cp;
574 u16 link_policy = 0;
575
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
584
585 cp.policy = cpu_to_le16(link_policy);
42c6b129 586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
587}
588
42c6b129 589static void hci_set_le_support(struct hci_request *req)
2177bab5 590{
42c6b129 591 struct hci_dev *hdev = req->hdev;
2177bab5
JH
592 struct hci_cp_write_le_host_supported cp;
593
c73eee91
JH
594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
596 return;
597
2177bab5
JH
598 memset(&cp, 0, sizeof(cp));
599
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601 cp.le = 0x01;
602 cp.simul = lmp_le_br_capable(hdev);
603 }
604
605 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607 &cp);
2177bab5
JH
608}
609
d62e6d67
JH
610static void hci_set_event_mask_page_2(struct hci_request *req)
611{
612 struct hci_dev *hdev = req->hdev;
613 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
614
615 /* If Connectionless Slave Broadcast master role is supported
616 * enable all necessary events for it.
617 */
618 if (hdev->features[2][0] & 0x01) {
619 events[1] |= 0x40; /* Triggered Clock Capture */
620 events[1] |= 0x80; /* Synchronization Train Complete */
621 events[2] |= 0x10; /* Slave Page Response Timeout */
622 events[2] |= 0x20; /* CSB Channel Map Change */
623 }
624
625 /* If Connectionless Slave Broadcast slave role is supported
626 * enable all necessary events for it.
627 */
628 if (hdev->features[2][0] & 0x02) {
629 events[2] |= 0x01; /* Synchronization Train Received */
630 events[2] |= 0x02; /* CSB Receive */
631 events[2] |= 0x04; /* CSB Timeout */
632 events[2] |= 0x08; /* Truncated Page Complete */
633 }
634
635 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
636}
637
42c6b129 638static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 639{
42c6b129 640 struct hci_dev *hdev = req->hdev;
d2c5d77f 641 u8 p;
42c6b129 642
b8f4e068
GP
643 /* Some Broadcom based Bluetooth controllers do not support the
644 * Delete Stored Link Key command. They are clearly indicating its
645 * absence in the bit mask of supported commands.
646 *
647 * Check the supported commands and only if the the command is marked
648 * as supported send it. If not supported assume that the controller
649 * does not have actual support for stored link keys which makes this
650 * command redundant anyway.
637b4cae 651 */
59f45d57
JH
652 if (hdev->commands[6] & 0x80) {
653 struct hci_cp_delete_stored_link_key cp;
654
655 bacpy(&cp.bdaddr, BDADDR_ANY);
656 cp.delete_all = 0x01;
657 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
658 sizeof(cp), &cp);
659 }
660
2177bab5 661 if (hdev->commands[5] & 0x10)
42c6b129 662 hci_setup_link_policy(req);
2177bab5 663
04b4edcb 664 if (lmp_le_capable(hdev)) {
42c6b129 665 hci_set_le_support(req);
04b4edcb
JH
666 hci_update_ad(req);
667 }
d2c5d77f
JH
668
669 /* Read features beyond page 1 if available */
670 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
671 struct hci_cp_read_local_ext_features cp;
672
673 cp.page = p;
674 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
675 sizeof(cp), &cp);
676 }
2177bab5
JH
677}
678
5d4e7e8d
JH
679static void hci_init4_req(struct hci_request *req, unsigned long opt)
680{
681 struct hci_dev *hdev = req->hdev;
682
d62e6d67
JH
683 /* Set event mask page 2 if the HCI command for it is supported */
684 if (hdev->commands[22] & 0x04)
685 hci_set_event_mask_page_2(req);
686
5d4e7e8d
JH
687 /* Check for Synchronization Train support */
688 if (hdev->features[2][0] & 0x04)
689 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
690}
691
2177bab5
JH
692static int __hci_init(struct hci_dev *hdev)
693{
694 int err;
695
696 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
697 if (err < 0)
698 return err;
699
700 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
701 * BR/EDR/LE type controllers. AMP controllers only need the
702 * first stage init.
703 */
704 if (hdev->dev_type != HCI_BREDR)
705 return 0;
706
707 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
708 if (err < 0)
709 return err;
710
5d4e7e8d
JH
711 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
712 if (err < 0)
713 return err;
714
715 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
2177bab5
JH
716}
717
42c6b129 718static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
719{
720 __u8 scan = opt;
721
42c6b129 722 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
723
724 /* Inquiry and Page scans */
42c6b129 725 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
726}
727
42c6b129 728static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
729{
730 __u8 auth = opt;
731
42c6b129 732 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
733
734 /* Authentication */
42c6b129 735 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
736}
737
42c6b129 738static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
739{
740 __u8 encrypt = opt;
741
42c6b129 742 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 743
e4e8e37c 744 /* Encryption */
42c6b129 745 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
746}
747
42c6b129 748static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
749{
750 __le16 policy = cpu_to_le16(opt);
751
42c6b129 752 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
753
754 /* Default link policy */
42c6b129 755 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
756}
757
8e87d142 758/* Get HCI device by index.
1da177e4
LT
759 * Device is held on return. */
760struct hci_dev *hci_dev_get(int index)
761{
8035ded4 762 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
763
764 BT_DBG("%d", index);
765
766 if (index < 0)
767 return NULL;
768
769 read_lock(&hci_dev_list_lock);
8035ded4 770 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
771 if (d->id == index) {
772 hdev = hci_dev_hold(d);
773 break;
774 }
775 }
776 read_unlock(&hci_dev_list_lock);
777 return hdev;
778}
1da177e4
LT
779
780/* ---- Inquiry support ---- */
ff9ef578 781
30dc78e1
JH
782bool hci_discovery_active(struct hci_dev *hdev)
783{
784 struct discovery_state *discov = &hdev->discovery;
785
6fbe195d 786 switch (discov->state) {
343f935b 787 case DISCOVERY_FINDING:
6fbe195d 788 case DISCOVERY_RESOLVING:
30dc78e1
JH
789 return true;
790
6fbe195d
AG
791 default:
792 return false;
793 }
30dc78e1
JH
794}
795
ff9ef578
JH
796void hci_discovery_set_state(struct hci_dev *hdev, int state)
797{
798 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
799
800 if (hdev->discovery.state == state)
801 return;
802
803 switch (state) {
804 case DISCOVERY_STOPPED:
7b99b659
AG
805 if (hdev->discovery.state != DISCOVERY_STARTING)
806 mgmt_discovering(hdev, 0);
ff9ef578
JH
807 break;
808 case DISCOVERY_STARTING:
809 break;
343f935b 810 case DISCOVERY_FINDING:
ff9ef578
JH
811 mgmt_discovering(hdev, 1);
812 break;
30dc78e1
JH
813 case DISCOVERY_RESOLVING:
814 break;
ff9ef578
JH
815 case DISCOVERY_STOPPING:
816 break;
817 }
818
819 hdev->discovery.state = state;
820}
821
1f9b9a5d 822void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 823{
30883512 824 struct discovery_state *cache = &hdev->discovery;
b57c1a56 825 struct inquiry_entry *p, *n;
1da177e4 826
561aafbc
JH
827 list_for_each_entry_safe(p, n, &cache->all, all) {
828 list_del(&p->all);
b57c1a56 829 kfree(p);
1da177e4 830 }
561aafbc
JH
831
832 INIT_LIST_HEAD(&cache->unknown);
833 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
834}
835
a8c5fb1a
GP
836struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
837 bdaddr_t *bdaddr)
1da177e4 838{
30883512 839 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
840 struct inquiry_entry *e;
841
6ed93dc6 842 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 843
561aafbc
JH
844 list_for_each_entry(e, &cache->all, all) {
845 if (!bacmp(&e->data.bdaddr, bdaddr))
846 return e;
847 }
848
849 return NULL;
850}
851
852struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 853 bdaddr_t *bdaddr)
561aafbc 854{
30883512 855 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
856 struct inquiry_entry *e;
857
6ed93dc6 858 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
859
860 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 861 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
862 return e;
863 }
864
865 return NULL;
1da177e4
LT
866}
867
30dc78e1 868struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
869 bdaddr_t *bdaddr,
870 int state)
30dc78e1
JH
871{
872 struct discovery_state *cache = &hdev->discovery;
873 struct inquiry_entry *e;
874
6ed93dc6 875 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
876
877 list_for_each_entry(e, &cache->resolve, list) {
878 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
879 return e;
880 if (!bacmp(&e->data.bdaddr, bdaddr))
881 return e;
882 }
883
884 return NULL;
885}
886
a3d4e20a 887void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 888 struct inquiry_entry *ie)
a3d4e20a
JH
889{
890 struct discovery_state *cache = &hdev->discovery;
891 struct list_head *pos = &cache->resolve;
892 struct inquiry_entry *p;
893
894 list_del(&ie->list);
895
896 list_for_each_entry(p, &cache->resolve, list) {
897 if (p->name_state != NAME_PENDING &&
a8c5fb1a 898 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
899 break;
900 pos = &p->list;
901 }
902
903 list_add(&ie->list, pos);
904}
905
3175405b 906bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 907 bool name_known, bool *ssp)
1da177e4 908{
30883512 909 struct discovery_state *cache = &hdev->discovery;
70f23020 910 struct inquiry_entry *ie;
1da177e4 911
6ed93dc6 912 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 913
2b2fec4d
SJ
914 hci_remove_remote_oob_data(hdev, &data->bdaddr);
915
388fc8fa
JH
916 if (ssp)
917 *ssp = data->ssp_mode;
918
70f23020 919 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 920 if (ie) {
388fc8fa
JH
921 if (ie->data.ssp_mode && ssp)
922 *ssp = true;
923
a3d4e20a 924 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 925 data->rssi != ie->data.rssi) {
a3d4e20a
JH
926 ie->data.rssi = data->rssi;
927 hci_inquiry_cache_update_resolve(hdev, ie);
928 }
929
561aafbc 930 goto update;
a3d4e20a 931 }
561aafbc
JH
932
933 /* Entry not in the cache. Add new one. */
934 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
935 if (!ie)
3175405b 936 return false;
561aafbc
JH
937
938 list_add(&ie->all, &cache->all);
939
940 if (name_known) {
941 ie->name_state = NAME_KNOWN;
942 } else {
943 ie->name_state = NAME_NOT_KNOWN;
944 list_add(&ie->list, &cache->unknown);
945 }
70f23020 946
561aafbc
JH
947update:
948 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 949 ie->name_state != NAME_PENDING) {
561aafbc
JH
950 ie->name_state = NAME_KNOWN;
951 list_del(&ie->list);
1da177e4
LT
952 }
953
70f23020
AE
954 memcpy(&ie->data, data, sizeof(*data));
955 ie->timestamp = jiffies;
1da177e4 956 cache->timestamp = jiffies;
3175405b
JH
957
958 if (ie->name_state == NAME_NOT_KNOWN)
959 return false;
960
961 return true;
1da177e4
LT
962}
963
964static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
965{
30883512 966 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
967 struct inquiry_info *info = (struct inquiry_info *) buf;
968 struct inquiry_entry *e;
969 int copied = 0;
970
561aafbc 971 list_for_each_entry(e, &cache->all, all) {
1da177e4 972 struct inquiry_data *data = &e->data;
b57c1a56
JH
973
974 if (copied >= num)
975 break;
976
1da177e4
LT
977 bacpy(&info->bdaddr, &data->bdaddr);
978 info->pscan_rep_mode = data->pscan_rep_mode;
979 info->pscan_period_mode = data->pscan_period_mode;
980 info->pscan_mode = data->pscan_mode;
981 memcpy(info->dev_class, data->dev_class, 3);
982 info->clock_offset = data->clock_offset;
b57c1a56 983
1da177e4 984 info++;
b57c1a56 985 copied++;
1da177e4
LT
986 }
987
988 BT_DBG("cache %p, copied %d", cache, copied);
989 return copied;
990}
991
42c6b129 992static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
993{
994 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 995 struct hci_dev *hdev = req->hdev;
1da177e4
LT
996 struct hci_cp_inquiry cp;
997
998 BT_DBG("%s", hdev->name);
999
1000 if (test_bit(HCI_INQUIRY, &hdev->flags))
1001 return;
1002
1003 /* Start Inquiry */
1004 memcpy(&cp.lap, &ir->lap, 3);
1005 cp.length = ir->length;
1006 cp.num_rsp = ir->num_rsp;
42c6b129 1007 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1008}
1009
3e13fa1e
AG
1010static int wait_inquiry(void *word)
1011{
1012 schedule();
1013 return signal_pending(current);
1014}
1015
1da177e4
LT
1016int hci_inquiry(void __user *arg)
1017{
1018 __u8 __user *ptr = arg;
1019 struct hci_inquiry_req ir;
1020 struct hci_dev *hdev;
1021 int err = 0, do_inquiry = 0, max_rsp;
1022 long timeo;
1023 __u8 *buf;
1024
1025 if (copy_from_user(&ir, ptr, sizeof(ir)))
1026 return -EFAULT;
1027
5a08ecce
AE
1028 hdev = hci_dev_get(ir.dev_id);
1029 if (!hdev)
1da177e4
LT
1030 return -ENODEV;
1031
0736cfa8
MH
1032 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1033 err = -EBUSY;
1034 goto done;
1035 }
1036
09fd0de5 1037 hci_dev_lock(hdev);
8e87d142 1038 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1039 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1040 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1041 do_inquiry = 1;
1042 }
09fd0de5 1043 hci_dev_unlock(hdev);
1da177e4 1044
04837f64 1045 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1046
1047 if (do_inquiry) {
01178cd4
JH
1048 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1049 timeo);
70f23020
AE
1050 if (err < 0)
1051 goto done;
3e13fa1e
AG
1052
1053 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1054 * cleared). If it is interrupted by a signal, return -EINTR.
1055 */
1056 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1057 TASK_INTERRUPTIBLE))
1058 return -EINTR;
70f23020 1059 }
1da177e4 1060
8fc9ced3
GP
1061 /* for unlimited number of responses we will use buffer with
1062 * 255 entries
1063 */
1da177e4
LT
1064 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1065
1066 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1067 * copy it to the user space.
1068 */
01df8c31 1069 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1070 if (!buf) {
1da177e4
LT
1071 err = -ENOMEM;
1072 goto done;
1073 }
1074
09fd0de5 1075 hci_dev_lock(hdev);
1da177e4 1076 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1077 hci_dev_unlock(hdev);
1da177e4
LT
1078
1079 BT_DBG("num_rsp %d", ir.num_rsp);
1080
1081 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1082 ptr += sizeof(ir);
1083 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1084 ir.num_rsp))
1da177e4 1085 err = -EFAULT;
8e87d142 1086 } else
1da177e4
LT
1087 err = -EFAULT;
1088
1089 kfree(buf);
1090
1091done:
1092 hci_dev_put(hdev);
1093 return err;
1094}
1095
3f0f524b
JH
1096static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1097{
1098 u8 ad_len = 0, flags = 0;
1099 size_t name_len;
1100
1101 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1102 flags |= LE_AD_GENERAL;
1103
1104 if (!lmp_bredr_capable(hdev))
1105 flags |= LE_AD_NO_BREDR;
1106
1107 if (lmp_le_br_capable(hdev))
1108 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1109
1110 if (lmp_host_le_br_capable(hdev))
1111 flags |= LE_AD_SIM_LE_BREDR_HOST;
1112
1113 if (flags) {
1114 BT_DBG("adv flags 0x%02x", flags);
1115
1116 ptr[0] = 2;
1117 ptr[1] = EIR_FLAGS;
1118 ptr[2] = flags;
1119
1120 ad_len += 3;
1121 ptr += 3;
1122 }
1123
1124 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1125 ptr[0] = 2;
1126 ptr[1] = EIR_TX_POWER;
1127 ptr[2] = (u8) hdev->adv_tx_power;
1128
1129 ad_len += 3;
1130 ptr += 3;
1131 }
1132
1133 name_len = strlen(hdev->dev_name);
1134 if (name_len > 0) {
1135 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1136
1137 if (name_len > max_len) {
1138 name_len = max_len;
1139 ptr[1] = EIR_NAME_SHORT;
1140 } else
1141 ptr[1] = EIR_NAME_COMPLETE;
1142
1143 ptr[0] = name_len + 1;
1144
1145 memcpy(ptr + 2, hdev->dev_name, name_len);
1146
1147 ad_len += (name_len + 2);
1148 ptr += (name_len + 2);
1149 }
1150
1151 return ad_len;
1152}
1153
04b4edcb 1154void hci_update_ad(struct hci_request *req)
3f0f524b 1155{
04b4edcb 1156 struct hci_dev *hdev = req->hdev;
3f0f524b
JH
1157 struct hci_cp_le_set_adv_data cp;
1158 u8 len;
3f0f524b 1159
04b4edcb
JH
1160 if (!lmp_le_capable(hdev))
1161 return;
3f0f524b
JH
1162
1163 memset(&cp, 0, sizeof(cp));
1164
1165 len = create_ad(hdev, cp.data);
1166
1167 if (hdev->adv_data_len == len &&
04b4edcb
JH
1168 memcmp(cp.data, hdev->adv_data, len) == 0)
1169 return;
3f0f524b
JH
1170
1171 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1172 hdev->adv_data_len = len;
1173
1174 cp.length = len;
3f0f524b 1175
04b4edcb 1176 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
3f0f524b
JH
1177}
1178
1da177e4
LT
1179/* ---- HCI ioctl helpers ---- */
1180
1181int hci_dev_open(__u16 dev)
1182{
1183 struct hci_dev *hdev;
1184 int ret = 0;
1185
5a08ecce
AE
1186 hdev = hci_dev_get(dev);
1187 if (!hdev)
1da177e4
LT
1188 return -ENODEV;
1189
1190 BT_DBG("%s %p", hdev->name, hdev);
1191
1192 hci_req_lock(hdev);
1193
94324962
JH
1194 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1195 ret = -ENODEV;
1196 goto done;
1197 }
1198
bf543036
JH
1199 /* Check for rfkill but allow the HCI setup stage to proceed
1200 * (which in itself doesn't cause any RF activity).
1201 */
1202 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1203 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
611b30f7
MH
1204 ret = -ERFKILL;
1205 goto done;
1206 }
1207
1da177e4
LT
1208 if (test_bit(HCI_UP, &hdev->flags)) {
1209 ret = -EALREADY;
1210 goto done;
1211 }
1212
1da177e4
LT
1213 if (hdev->open(hdev)) {
1214 ret = -EIO;
1215 goto done;
1216 }
1217
f41c70c4
MH
1218 atomic_set(&hdev->cmd_cnt, 1);
1219 set_bit(HCI_INIT, &hdev->flags);
1220
1221 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1222 ret = hdev->setup(hdev);
1223
1224 if (!ret) {
1225 /* Treat all non BR/EDR controllers as raw devices if
1226 * enable_hs is not set.
1227 */
1228 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1229 set_bit(HCI_RAW, &hdev->flags);
1230
1231 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1232 set_bit(HCI_RAW, &hdev->flags);
1233
0736cfa8
MH
1234 if (!test_bit(HCI_RAW, &hdev->flags) &&
1235 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1236 ret = __hci_init(hdev);
1da177e4
LT
1237 }
1238
f41c70c4
MH
1239 clear_bit(HCI_INIT, &hdev->flags);
1240
1da177e4
LT
1241 if (!ret) {
1242 hci_dev_hold(hdev);
1243 set_bit(HCI_UP, &hdev->flags);
1244 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1245 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1246 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
bb4b2a9a 1247 mgmt_valid_hdev(hdev)) {
09fd0de5 1248 hci_dev_lock(hdev);
744cf19e 1249 mgmt_powered(hdev, 1);
09fd0de5 1250 hci_dev_unlock(hdev);
56e5cb86 1251 }
8e87d142 1252 } else {
1da177e4 1253 /* Init failed, cleanup */
3eff45ea 1254 flush_work(&hdev->tx_work);
c347b765 1255 flush_work(&hdev->cmd_work);
b78752cc 1256 flush_work(&hdev->rx_work);
1da177e4
LT
1257
1258 skb_queue_purge(&hdev->cmd_q);
1259 skb_queue_purge(&hdev->rx_q);
1260
1261 if (hdev->flush)
1262 hdev->flush(hdev);
1263
1264 if (hdev->sent_cmd) {
1265 kfree_skb(hdev->sent_cmd);
1266 hdev->sent_cmd = NULL;
1267 }
1268
1269 hdev->close(hdev);
1270 hdev->flags = 0;
1271 }
1272
1273done:
1274 hci_req_unlock(hdev);
1275 hci_dev_put(hdev);
1276 return ret;
1277}
1278
1279static int hci_dev_do_close(struct hci_dev *hdev)
1280{
1281 BT_DBG("%s %p", hdev->name, hdev);
1282
78c04c0b
VCG
1283 cancel_delayed_work(&hdev->power_off);
1284
1da177e4
LT
1285 hci_req_cancel(hdev, ENODEV);
1286 hci_req_lock(hdev);
1287
1288 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1289 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1290 hci_req_unlock(hdev);
1291 return 0;
1292 }
1293
3eff45ea
GP
1294 /* Flush RX and TX works */
1295 flush_work(&hdev->tx_work);
b78752cc 1296 flush_work(&hdev->rx_work);
1da177e4 1297
16ab91ab 1298 if (hdev->discov_timeout > 0) {
e0f9309f 1299 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1300 hdev->discov_timeout = 0;
5e5282bb 1301 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1302 }
1303
a8b2d5c2 1304 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1305 cancel_delayed_work(&hdev->service_cache);
1306
7ba8b4be
AG
1307 cancel_delayed_work_sync(&hdev->le_scan_disable);
1308
09fd0de5 1309 hci_dev_lock(hdev);
1f9b9a5d 1310 hci_inquiry_cache_flush(hdev);
1da177e4 1311 hci_conn_hash_flush(hdev);
09fd0de5 1312 hci_dev_unlock(hdev);
1da177e4
LT
1313
1314 hci_notify(hdev, HCI_DEV_DOWN);
1315
1316 if (hdev->flush)
1317 hdev->flush(hdev);
1318
1319 /* Reset device */
1320 skb_queue_purge(&hdev->cmd_q);
1321 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1322 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1323 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1324 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1325 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1326 clear_bit(HCI_INIT, &hdev->flags);
1327 }
1328
c347b765
GP
1329 /* flush cmd work */
1330 flush_work(&hdev->cmd_work);
1da177e4
LT
1331
1332 /* Drop queues */
1333 skb_queue_purge(&hdev->rx_q);
1334 skb_queue_purge(&hdev->cmd_q);
1335 skb_queue_purge(&hdev->raw_q);
1336
1337 /* Drop last sent command */
1338 if (hdev->sent_cmd) {
b79f44c1 1339 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1340 kfree_skb(hdev->sent_cmd);
1341 hdev->sent_cmd = NULL;
1342 }
1343
b6ddb638
JH
1344 kfree_skb(hdev->recv_evt);
1345 hdev->recv_evt = NULL;
1346
1da177e4
LT
1347 /* After this point our queues are empty
1348 * and no tasks are scheduled. */
1349 hdev->close(hdev);
1350
35b973c9
JH
1351 /* Clear flags */
1352 hdev->flags = 0;
1353 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1354
bb4b2a9a
AE
1355 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1356 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1357 hci_dev_lock(hdev);
1358 mgmt_powered(hdev, 0);
1359 hci_dev_unlock(hdev);
1360 }
5add6af8 1361
ced5c338
AE
1362 /* Controller radio is available but is currently powered down */
1363 hdev->amp_status = 0;
1364
e59fda8d 1365 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1366 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1367
1da177e4
LT
1368 hci_req_unlock(hdev);
1369
1370 hci_dev_put(hdev);
1371 return 0;
1372}
1373
1374int hci_dev_close(__u16 dev)
1375{
1376 struct hci_dev *hdev;
1377 int err;
1378
70f23020
AE
1379 hdev = hci_dev_get(dev);
1380 if (!hdev)
1da177e4 1381 return -ENODEV;
8ee56540 1382
0736cfa8
MH
1383 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1384 err = -EBUSY;
1385 goto done;
1386 }
1387
8ee56540
MH
1388 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1389 cancel_delayed_work(&hdev->power_off);
1390
1da177e4 1391 err = hci_dev_do_close(hdev);
8ee56540 1392
0736cfa8 1393done:
1da177e4
LT
1394 hci_dev_put(hdev);
1395 return err;
1396}
1397
1398int hci_dev_reset(__u16 dev)
1399{
1400 struct hci_dev *hdev;
1401 int ret = 0;
1402
70f23020
AE
1403 hdev = hci_dev_get(dev);
1404 if (!hdev)
1da177e4
LT
1405 return -ENODEV;
1406
1407 hci_req_lock(hdev);
1da177e4 1408
808a049e
MH
1409 if (!test_bit(HCI_UP, &hdev->flags)) {
1410 ret = -ENETDOWN;
1da177e4 1411 goto done;
808a049e 1412 }
1da177e4 1413
0736cfa8
MH
1414 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1415 ret = -EBUSY;
1416 goto done;
1417 }
1418
1da177e4
LT
1419 /* Drop queues */
1420 skb_queue_purge(&hdev->rx_q);
1421 skb_queue_purge(&hdev->cmd_q);
1422
09fd0de5 1423 hci_dev_lock(hdev);
1f9b9a5d 1424 hci_inquiry_cache_flush(hdev);
1da177e4 1425 hci_conn_hash_flush(hdev);
09fd0de5 1426 hci_dev_unlock(hdev);
1da177e4
LT
1427
1428 if (hdev->flush)
1429 hdev->flush(hdev);
1430
8e87d142 1431 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1432 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1433
1434 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1435 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1436
1437done:
1da177e4
LT
1438 hci_req_unlock(hdev);
1439 hci_dev_put(hdev);
1440 return ret;
1441}
1442
1443int hci_dev_reset_stat(__u16 dev)
1444{
1445 struct hci_dev *hdev;
1446 int ret = 0;
1447
70f23020
AE
1448 hdev = hci_dev_get(dev);
1449 if (!hdev)
1da177e4
LT
1450 return -ENODEV;
1451
0736cfa8
MH
1452 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1453 ret = -EBUSY;
1454 goto done;
1455 }
1456
1da177e4
LT
1457 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1458
0736cfa8 1459done:
1da177e4 1460 hci_dev_put(hdev);
1da177e4
LT
1461 return ret;
1462}
1463
1464int hci_dev_cmd(unsigned int cmd, void __user *arg)
1465{
1466 struct hci_dev *hdev;
1467 struct hci_dev_req dr;
1468 int err = 0;
1469
1470 if (copy_from_user(&dr, arg, sizeof(dr)))
1471 return -EFAULT;
1472
70f23020
AE
1473 hdev = hci_dev_get(dr.dev_id);
1474 if (!hdev)
1da177e4
LT
1475 return -ENODEV;
1476
0736cfa8
MH
1477 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1478 err = -EBUSY;
1479 goto done;
1480 }
1481
1da177e4
LT
1482 switch (cmd) {
1483 case HCISETAUTH:
01178cd4
JH
1484 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1485 HCI_INIT_TIMEOUT);
1da177e4
LT
1486 break;
1487
1488 case HCISETENCRYPT:
1489 if (!lmp_encrypt_capable(hdev)) {
1490 err = -EOPNOTSUPP;
1491 break;
1492 }
1493
1494 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1495 /* Auth must be enabled first */
01178cd4
JH
1496 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1497 HCI_INIT_TIMEOUT);
1da177e4
LT
1498 if (err)
1499 break;
1500 }
1501
01178cd4
JH
1502 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1503 HCI_INIT_TIMEOUT);
1da177e4
LT
1504 break;
1505
1506 case HCISETSCAN:
01178cd4
JH
1507 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1508 HCI_INIT_TIMEOUT);
1da177e4
LT
1509 break;
1510
1da177e4 1511 case HCISETLINKPOL:
01178cd4
JH
1512 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1513 HCI_INIT_TIMEOUT);
1da177e4
LT
1514 break;
1515
1516 case HCISETLINKMODE:
e4e8e37c
MH
1517 hdev->link_mode = ((__u16) dr.dev_opt) &
1518 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1519 break;
1520
1521 case HCISETPTYPE:
1522 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1523 break;
1524
1525 case HCISETACLMTU:
e4e8e37c
MH
1526 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1527 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1528 break;
1529
1530 case HCISETSCOMTU:
e4e8e37c
MH
1531 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1532 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1533 break;
1534
1535 default:
1536 err = -EINVAL;
1537 break;
1538 }
e4e8e37c 1539
0736cfa8 1540done:
1da177e4
LT
1541 hci_dev_put(hdev);
1542 return err;
1543}
1544
1545int hci_get_dev_list(void __user *arg)
1546{
8035ded4 1547 struct hci_dev *hdev;
1da177e4
LT
1548 struct hci_dev_list_req *dl;
1549 struct hci_dev_req *dr;
1da177e4
LT
1550 int n = 0, size, err;
1551 __u16 dev_num;
1552
1553 if (get_user(dev_num, (__u16 __user *) arg))
1554 return -EFAULT;
1555
1556 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1557 return -EINVAL;
1558
1559 size = sizeof(*dl) + dev_num * sizeof(*dr);
1560
70f23020
AE
1561 dl = kzalloc(size, GFP_KERNEL);
1562 if (!dl)
1da177e4
LT
1563 return -ENOMEM;
1564
1565 dr = dl->dev_req;
1566
f20d09d5 1567 read_lock(&hci_dev_list_lock);
8035ded4 1568 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1569 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1570 cancel_delayed_work(&hdev->power_off);
c542a06c 1571
a8b2d5c2
JH
1572 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1573 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1574
1da177e4
LT
1575 (dr + n)->dev_id = hdev->id;
1576 (dr + n)->dev_opt = hdev->flags;
c542a06c 1577
1da177e4
LT
1578 if (++n >= dev_num)
1579 break;
1580 }
f20d09d5 1581 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1582
1583 dl->dev_num = n;
1584 size = sizeof(*dl) + n * sizeof(*dr);
1585
1586 err = copy_to_user(arg, dl, size);
1587 kfree(dl);
1588
1589 return err ? -EFAULT : 0;
1590}
1591
1592int hci_get_dev_info(void __user *arg)
1593{
1594 struct hci_dev *hdev;
1595 struct hci_dev_info di;
1596 int err = 0;
1597
1598 if (copy_from_user(&di, arg, sizeof(di)))
1599 return -EFAULT;
1600
70f23020
AE
1601 hdev = hci_dev_get(di.dev_id);
1602 if (!hdev)
1da177e4
LT
1603 return -ENODEV;
1604
a8b2d5c2 1605 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1606 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1607
a8b2d5c2
JH
1608 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1609 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1610
1da177e4
LT
1611 strcpy(di.name, hdev->name);
1612 di.bdaddr = hdev->bdaddr;
943da25d 1613 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1614 di.flags = hdev->flags;
1615 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1616 if (lmp_bredr_capable(hdev)) {
1617 di.acl_mtu = hdev->acl_mtu;
1618 di.acl_pkts = hdev->acl_pkts;
1619 di.sco_mtu = hdev->sco_mtu;
1620 di.sco_pkts = hdev->sco_pkts;
1621 } else {
1622 di.acl_mtu = hdev->le_mtu;
1623 di.acl_pkts = hdev->le_pkts;
1624 di.sco_mtu = 0;
1625 di.sco_pkts = 0;
1626 }
1da177e4
LT
1627 di.link_policy = hdev->link_policy;
1628 di.link_mode = hdev->link_mode;
1629
1630 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1631 memcpy(&di.features, &hdev->features, sizeof(di.features));
1632
1633 if (copy_to_user(arg, &di, sizeof(di)))
1634 err = -EFAULT;
1635
1636 hci_dev_put(hdev);
1637
1638 return err;
1639}
1640
1641/* ---- Interface to HCI drivers ---- */
1642
611b30f7
MH
1643static int hci_rfkill_set_block(void *data, bool blocked)
1644{
1645 struct hci_dev *hdev = data;
1646
1647 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1648
0736cfa8
MH
1649 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1650 return -EBUSY;
1651
5e130367
JH
1652 if (blocked) {
1653 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
1654 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1655 hci_dev_do_close(hdev);
5e130367
JH
1656 } else {
1657 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 1658 }
611b30f7
MH
1659
1660 return 0;
1661}
1662
1663static const struct rfkill_ops hci_rfkill_ops = {
1664 .set_block = hci_rfkill_set_block,
1665};
1666
ab81cbf9
JH
1667static void hci_power_on(struct work_struct *work)
1668{
1669 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 1670 int err;
ab81cbf9
JH
1671
1672 BT_DBG("%s", hdev->name);
1673
96570ffc
JH
1674 err = hci_dev_open(hdev->id);
1675 if (err < 0) {
1676 mgmt_set_powered_failed(hdev, err);
ab81cbf9 1677 return;
96570ffc 1678 }
ab81cbf9 1679
bf543036
JH
1680 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1681 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1682 hci_dev_do_close(hdev);
1683 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
1684 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1685 HCI_AUTO_OFF_TIMEOUT);
bf543036 1686 }
ab81cbf9 1687
a8b2d5c2 1688 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1689 mgmt_index_added(hdev);
ab81cbf9
JH
1690}
1691
1692static void hci_power_off(struct work_struct *work)
1693{
3243553f 1694 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1695 power_off.work);
ab81cbf9
JH
1696
1697 BT_DBG("%s", hdev->name);
1698
8ee56540 1699 hci_dev_do_close(hdev);
ab81cbf9
JH
1700}
1701
16ab91ab
JH
1702static void hci_discov_off(struct work_struct *work)
1703{
1704 struct hci_dev *hdev;
1705 u8 scan = SCAN_PAGE;
1706
1707 hdev = container_of(work, struct hci_dev, discov_off.work);
1708
1709 BT_DBG("%s", hdev->name);
1710
09fd0de5 1711 hci_dev_lock(hdev);
16ab91ab
JH
1712
1713 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1714
1715 hdev->discov_timeout = 0;
1716
09fd0de5 1717 hci_dev_unlock(hdev);
16ab91ab
JH
1718}
1719
2aeb9a1a
JH
1720int hci_uuids_clear(struct hci_dev *hdev)
1721{
4821002c 1722 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1723
4821002c
JH
1724 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1725 list_del(&uuid->list);
2aeb9a1a
JH
1726 kfree(uuid);
1727 }
1728
1729 return 0;
1730}
1731
55ed8ca1
JH
1732int hci_link_keys_clear(struct hci_dev *hdev)
1733{
1734 struct list_head *p, *n;
1735
1736 list_for_each_safe(p, n, &hdev->link_keys) {
1737 struct link_key *key;
1738
1739 key = list_entry(p, struct link_key, list);
1740
1741 list_del(p);
1742 kfree(key);
1743 }
1744
1745 return 0;
1746}
1747
b899efaf
VCG
1748int hci_smp_ltks_clear(struct hci_dev *hdev)
1749{
1750 struct smp_ltk *k, *tmp;
1751
1752 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1753 list_del(&k->list);
1754 kfree(k);
1755 }
1756
1757 return 0;
1758}
1759
55ed8ca1
JH
1760struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1761{
8035ded4 1762 struct link_key *k;
55ed8ca1 1763
8035ded4 1764 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1765 if (bacmp(bdaddr, &k->bdaddr) == 0)
1766 return k;
55ed8ca1
JH
1767
1768 return NULL;
1769}
1770
745c0ce3 1771static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1772 u8 key_type, u8 old_key_type)
d25e28ab
JH
1773{
1774 /* Legacy key */
1775 if (key_type < 0x03)
745c0ce3 1776 return true;
d25e28ab
JH
1777
1778 /* Debug keys are insecure so don't store them persistently */
1779 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1780 return false;
d25e28ab
JH
1781
1782 /* Changed combination key and there's no previous one */
1783 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1784 return false;
d25e28ab
JH
1785
1786 /* Security mode 3 case */
1787 if (!conn)
745c0ce3 1788 return true;
d25e28ab
JH
1789
1790 /* Neither local nor remote side had no-bonding as requirement */
1791 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1792 return true;
d25e28ab
JH
1793
1794 /* Local side had dedicated bonding as requirement */
1795 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1796 return true;
d25e28ab
JH
1797
1798 /* Remote side had dedicated bonding as requirement */
1799 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1800 return true;
d25e28ab
JH
1801
1802 /* If none of the above criteria match, then don't store the key
1803 * persistently */
745c0ce3 1804 return false;
d25e28ab
JH
1805}
1806
c9839a11 1807struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1808{
c9839a11 1809 struct smp_ltk *k;
75d262c2 1810
c9839a11
VCG
1811 list_for_each_entry(k, &hdev->long_term_keys, list) {
1812 if (k->ediv != ediv ||
a8c5fb1a 1813 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1814 continue;
1815
c9839a11 1816 return k;
75d262c2
VCG
1817 }
1818
1819 return NULL;
1820}
75d262c2 1821
c9839a11 1822struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1823 u8 addr_type)
75d262c2 1824{
c9839a11 1825 struct smp_ltk *k;
75d262c2 1826
c9839a11
VCG
1827 list_for_each_entry(k, &hdev->long_term_keys, list)
1828 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1829 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1830 return k;
1831
1832 return NULL;
1833}
75d262c2 1834
d25e28ab 1835int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1836 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1837{
1838 struct link_key *key, *old_key;
745c0ce3
VA
1839 u8 old_key_type;
1840 bool persistent;
55ed8ca1
JH
1841
1842 old_key = hci_find_link_key(hdev, bdaddr);
1843 if (old_key) {
1844 old_key_type = old_key->type;
1845 key = old_key;
1846 } else {
12adcf3a 1847 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1848 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1849 if (!key)
1850 return -ENOMEM;
1851 list_add(&key->list, &hdev->link_keys);
1852 }
1853
6ed93dc6 1854 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1855
d25e28ab
JH
1856 /* Some buggy controller combinations generate a changed
1857 * combination key for legacy pairing even when there's no
1858 * previous key */
1859 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1860 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1861 type = HCI_LK_COMBINATION;
655fe6ec
JH
1862 if (conn)
1863 conn->key_type = type;
1864 }
d25e28ab 1865
55ed8ca1 1866 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1867 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1868 key->pin_len = pin_len;
1869
b6020ba0 1870 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1871 key->type = old_key_type;
4748fed2
JH
1872 else
1873 key->type = type;
1874
4df378a1
JH
1875 if (!new_key)
1876 return 0;
1877
1878 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1879
744cf19e 1880 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1881
6ec5bcad
VA
1882 if (conn)
1883 conn->flush_key = !persistent;
55ed8ca1
JH
1884
1885 return 0;
1886}
1887
c9839a11 1888int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1889 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1890 ediv, u8 rand[8])
75d262c2 1891{
c9839a11 1892 struct smp_ltk *key, *old_key;
75d262c2 1893
c9839a11
VCG
1894 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1895 return 0;
75d262c2 1896
c9839a11
VCG
1897 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1898 if (old_key)
75d262c2 1899 key = old_key;
c9839a11
VCG
1900 else {
1901 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1902 if (!key)
1903 return -ENOMEM;
c9839a11 1904 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1905 }
1906
75d262c2 1907 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1908 key->bdaddr_type = addr_type;
1909 memcpy(key->val, tk, sizeof(key->val));
1910 key->authenticated = authenticated;
1911 key->ediv = ediv;
1912 key->enc_size = enc_size;
1913 key->type = type;
1914 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1915
c9839a11
VCG
1916 if (!new_key)
1917 return 0;
75d262c2 1918
261cc5aa
VCG
1919 if (type & HCI_SMP_LTK)
1920 mgmt_new_ltk(hdev, key, 1);
1921
75d262c2
VCG
1922 return 0;
1923}
1924
55ed8ca1
JH
1925int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1926{
1927 struct link_key *key;
1928
1929 key = hci_find_link_key(hdev, bdaddr);
1930 if (!key)
1931 return -ENOENT;
1932
6ed93dc6 1933 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1934
1935 list_del(&key->list);
1936 kfree(key);
1937
1938 return 0;
1939}
1940
b899efaf
VCG
1941int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1942{
1943 struct smp_ltk *k, *tmp;
1944
1945 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1946 if (bacmp(bdaddr, &k->bdaddr))
1947 continue;
1948
6ed93dc6 1949 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1950
1951 list_del(&k->list);
1952 kfree(k);
1953 }
1954
1955 return 0;
1956}
1957
6bd32326 1958/* HCI command timer function */
bda4f23a 1959static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1960{
1961 struct hci_dev *hdev = (void *) arg;
1962
bda4f23a
AE
1963 if (hdev->sent_cmd) {
1964 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1965 u16 opcode = __le16_to_cpu(sent->opcode);
1966
1967 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1968 } else {
1969 BT_ERR("%s command tx timeout", hdev->name);
1970 }
1971
6bd32326 1972 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1973 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1974}
1975
2763eda6 1976struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1977 bdaddr_t *bdaddr)
2763eda6
SJ
1978{
1979 struct oob_data *data;
1980
1981 list_for_each_entry(data, &hdev->remote_oob_data, list)
1982 if (bacmp(bdaddr, &data->bdaddr) == 0)
1983 return data;
1984
1985 return NULL;
1986}
1987
1988int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1989{
1990 struct oob_data *data;
1991
1992 data = hci_find_remote_oob_data(hdev, bdaddr);
1993 if (!data)
1994 return -ENOENT;
1995
6ed93dc6 1996 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1997
1998 list_del(&data->list);
1999 kfree(data);
2000
2001 return 0;
2002}
2003
2004int hci_remote_oob_data_clear(struct hci_dev *hdev)
2005{
2006 struct oob_data *data, *n;
2007
2008 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2009 list_del(&data->list);
2010 kfree(data);
2011 }
2012
2013 return 0;
2014}
2015
2016int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 2017 u8 *randomizer)
2763eda6
SJ
2018{
2019 struct oob_data *data;
2020
2021 data = hci_find_remote_oob_data(hdev, bdaddr);
2022
2023 if (!data) {
2024 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2025 if (!data)
2026 return -ENOMEM;
2027
2028 bacpy(&data->bdaddr, bdaddr);
2029 list_add(&data->list, &hdev->remote_oob_data);
2030 }
2031
2032 memcpy(data->hash, hash, sizeof(data->hash));
2033 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2034
6ed93dc6 2035 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2036
2037 return 0;
2038}
2039
04124681 2040struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 2041{
8035ded4 2042 struct bdaddr_list *b;
b2a66aad 2043
8035ded4 2044 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
2045 if (bacmp(bdaddr, &b->bdaddr) == 0)
2046 return b;
b2a66aad
AJ
2047
2048 return NULL;
2049}
2050
2051int hci_blacklist_clear(struct hci_dev *hdev)
2052{
2053 struct list_head *p, *n;
2054
2055 list_for_each_safe(p, n, &hdev->blacklist) {
2056 struct bdaddr_list *b;
2057
2058 b = list_entry(p, struct bdaddr_list, list);
2059
2060 list_del(p);
2061 kfree(b);
2062 }
2063
2064 return 0;
2065}
2066
88c1fe4b 2067int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2068{
2069 struct bdaddr_list *entry;
b2a66aad
AJ
2070
2071 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2072 return -EBADF;
2073
5e762444
AJ
2074 if (hci_blacklist_lookup(hdev, bdaddr))
2075 return -EEXIST;
b2a66aad
AJ
2076
2077 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2078 if (!entry)
2079 return -ENOMEM;
b2a66aad
AJ
2080
2081 bacpy(&entry->bdaddr, bdaddr);
2082
2083 list_add(&entry->list, &hdev->blacklist);
2084
88c1fe4b 2085 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2086}
2087
88c1fe4b 2088int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2089{
2090 struct bdaddr_list *entry;
b2a66aad 2091
1ec918ce 2092 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 2093 return hci_blacklist_clear(hdev);
b2a66aad
AJ
2094
2095 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 2096 if (!entry)
5e762444 2097 return -ENOENT;
b2a66aad
AJ
2098
2099 list_del(&entry->list);
2100 kfree(entry);
2101
88c1fe4b 2102 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2103}
2104
4c87eaab 2105static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2106{
4c87eaab
AG
2107 if (status) {
2108 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2109
4c87eaab
AG
2110 hci_dev_lock(hdev);
2111 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2112 hci_dev_unlock(hdev);
2113 return;
2114 }
7ba8b4be
AG
2115}
2116
4c87eaab 2117static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2118{
4c87eaab
AG
2119 /* General inquiry access code (GIAC) */
2120 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2121 struct hci_request req;
2122 struct hci_cp_inquiry cp;
7ba8b4be
AG
2123 int err;
2124
4c87eaab
AG
2125 if (status) {
2126 BT_ERR("Failed to disable LE scanning: status %d", status);
2127 return;
2128 }
7ba8b4be 2129
4c87eaab
AG
2130 switch (hdev->discovery.type) {
2131 case DISCOV_TYPE_LE:
2132 hci_dev_lock(hdev);
2133 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2134 hci_dev_unlock(hdev);
2135 break;
7ba8b4be 2136
4c87eaab
AG
2137 case DISCOV_TYPE_INTERLEAVED:
2138 hci_req_init(&req, hdev);
7ba8b4be 2139
4c87eaab
AG
2140 memset(&cp, 0, sizeof(cp));
2141 memcpy(&cp.lap, lap, sizeof(cp.lap));
2142 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2143 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2144
4c87eaab 2145 hci_dev_lock(hdev);
7dbfac1d 2146
4c87eaab 2147 hci_inquiry_cache_flush(hdev);
7dbfac1d 2148
4c87eaab
AG
2149 err = hci_req_run(&req, inquiry_complete);
2150 if (err) {
2151 BT_ERR("Inquiry request failed: err %d", err);
2152 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2153 }
7dbfac1d 2154
4c87eaab
AG
2155 hci_dev_unlock(hdev);
2156 break;
7dbfac1d 2157 }
7dbfac1d
AG
2158}
2159
7ba8b4be
AG
2160static void le_scan_disable_work(struct work_struct *work)
2161{
2162 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2163 le_scan_disable.work);
7ba8b4be 2164 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
2165 struct hci_request req;
2166 int err;
7ba8b4be
AG
2167
2168 BT_DBG("%s", hdev->name);
2169
4c87eaab 2170 hci_req_init(&req, hdev);
28b75a89 2171
7ba8b4be 2172 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
2173 cp.enable = LE_SCAN_DISABLE;
2174 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 2175
4c87eaab
AG
2176 err = hci_req_run(&req, le_scan_disable_work_complete);
2177 if (err)
2178 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2179}
2180
9be0dab7
DH
2181/* Alloc HCI device */
2182struct hci_dev *hci_alloc_dev(void)
2183{
2184 struct hci_dev *hdev;
2185
2186 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2187 if (!hdev)
2188 return NULL;
2189
b1b813d4
DH
2190 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2191 hdev->esco_type = (ESCO_HV1);
2192 hdev->link_mode = (HCI_LM_ACCEPT);
2193 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2194 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2195 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2196
b1b813d4
DH
2197 hdev->sniff_max_interval = 800;
2198 hdev->sniff_min_interval = 80;
2199
2200 mutex_init(&hdev->lock);
2201 mutex_init(&hdev->req_lock);
2202
2203 INIT_LIST_HEAD(&hdev->mgmt_pending);
2204 INIT_LIST_HEAD(&hdev->blacklist);
2205 INIT_LIST_HEAD(&hdev->uuids);
2206 INIT_LIST_HEAD(&hdev->link_keys);
2207 INIT_LIST_HEAD(&hdev->long_term_keys);
2208 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2209 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2210
2211 INIT_WORK(&hdev->rx_work, hci_rx_work);
2212 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2213 INIT_WORK(&hdev->tx_work, hci_tx_work);
2214 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2215
b1b813d4
DH
2216 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2217 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2218 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2219
b1b813d4
DH
2220 skb_queue_head_init(&hdev->rx_q);
2221 skb_queue_head_init(&hdev->cmd_q);
2222 skb_queue_head_init(&hdev->raw_q);
2223
2224 init_waitqueue_head(&hdev->req_wait_q);
2225
bda4f23a 2226 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2227
b1b813d4
DH
2228 hci_init_sysfs(hdev);
2229 discovery_init(hdev);
9be0dab7
DH
2230
2231 return hdev;
2232}
2233EXPORT_SYMBOL(hci_alloc_dev);
2234
2235/* Free HCI device */
2236void hci_free_dev(struct hci_dev *hdev)
2237{
9be0dab7
DH
2238 /* will free via device release */
2239 put_device(&hdev->dev);
2240}
2241EXPORT_SYMBOL(hci_free_dev);
2242
1da177e4
LT
2243/* Register HCI device */
2244int hci_register_dev(struct hci_dev *hdev)
2245{
b1b813d4 2246 int id, error;
1da177e4 2247
010666a1 2248 if (!hdev->open || !hdev->close)
1da177e4
LT
2249 return -EINVAL;
2250
08add513
MM
2251 /* Do not allow HCI_AMP devices to register at index 0,
2252 * so the index can be used as the AMP controller ID.
2253 */
3df92b31
SL
2254 switch (hdev->dev_type) {
2255 case HCI_BREDR:
2256 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2257 break;
2258 case HCI_AMP:
2259 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2260 break;
2261 default:
2262 return -EINVAL;
1da177e4 2263 }
8e87d142 2264
3df92b31
SL
2265 if (id < 0)
2266 return id;
2267
1da177e4
LT
2268 sprintf(hdev->name, "hci%d", id);
2269 hdev->id = id;
2d8b3a11
AE
2270
2271 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2272
d8537548
KC
2273 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2274 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
2275 if (!hdev->workqueue) {
2276 error = -ENOMEM;
2277 goto err;
2278 }
f48fd9c8 2279
d8537548
KC
2280 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2281 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
2282 if (!hdev->req_workqueue) {
2283 destroy_workqueue(hdev->workqueue);
2284 error = -ENOMEM;
2285 goto err;
2286 }
2287
33ca954d
DH
2288 error = hci_add_sysfs(hdev);
2289 if (error < 0)
2290 goto err_wqueue;
1da177e4 2291
611b30f7 2292 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2293 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2294 hdev);
611b30f7
MH
2295 if (hdev->rfkill) {
2296 if (rfkill_register(hdev->rfkill) < 0) {
2297 rfkill_destroy(hdev->rfkill);
2298 hdev->rfkill = NULL;
2299 }
2300 }
2301
5e130367
JH
2302 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2303 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2304
a8b2d5c2 2305 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2306
2307 if (hdev->dev_type != HCI_AMP)
2308 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2309
fcee3377
GP
2310 write_lock(&hci_dev_list_lock);
2311 list_add(&hdev->list, &hci_dev_list);
2312 write_unlock(&hci_dev_list_lock);
2313
1da177e4 2314 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2315 hci_dev_hold(hdev);
1da177e4 2316
19202573 2317 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2318
1da177e4 2319 return id;
f48fd9c8 2320
33ca954d
DH
2321err_wqueue:
2322 destroy_workqueue(hdev->workqueue);
6ead1bbc 2323 destroy_workqueue(hdev->req_workqueue);
33ca954d 2324err:
3df92b31 2325 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 2326
33ca954d 2327 return error;
1da177e4
LT
2328}
2329EXPORT_SYMBOL(hci_register_dev);
2330
2331/* Unregister HCI device */
59735631 2332void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2333{
3df92b31 2334 int i, id;
ef222013 2335
c13854ce 2336 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2337
94324962
JH
2338 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2339
3df92b31
SL
2340 id = hdev->id;
2341
f20d09d5 2342 write_lock(&hci_dev_list_lock);
1da177e4 2343 list_del(&hdev->list);
f20d09d5 2344 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2345
2346 hci_dev_do_close(hdev);
2347
cd4c5391 2348 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2349 kfree_skb(hdev->reassembly[i]);
2350
b9b5ef18
GP
2351 cancel_work_sync(&hdev->power_on);
2352
ab81cbf9 2353 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2354 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2355 hci_dev_lock(hdev);
744cf19e 2356 mgmt_index_removed(hdev);
09fd0de5 2357 hci_dev_unlock(hdev);
56e5cb86 2358 }
ab81cbf9 2359
2e58ef3e
JH
2360 /* mgmt_index_removed should take care of emptying the
2361 * pending list */
2362 BUG_ON(!list_empty(&hdev->mgmt_pending));
2363
1da177e4
LT
2364 hci_notify(hdev, HCI_DEV_UNREG);
2365
611b30f7
MH
2366 if (hdev->rfkill) {
2367 rfkill_unregister(hdev->rfkill);
2368 rfkill_destroy(hdev->rfkill);
2369 }
2370
ce242970 2371 hci_del_sysfs(hdev);
147e2d59 2372
f48fd9c8 2373 destroy_workqueue(hdev->workqueue);
6ead1bbc 2374 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2375
09fd0de5 2376 hci_dev_lock(hdev);
e2e0cacb 2377 hci_blacklist_clear(hdev);
2aeb9a1a 2378 hci_uuids_clear(hdev);
55ed8ca1 2379 hci_link_keys_clear(hdev);
b899efaf 2380 hci_smp_ltks_clear(hdev);
2763eda6 2381 hci_remote_oob_data_clear(hdev);
09fd0de5 2382 hci_dev_unlock(hdev);
e2e0cacb 2383
dc946bd8 2384 hci_dev_put(hdev);
3df92b31
SL
2385
2386 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2387}
2388EXPORT_SYMBOL(hci_unregister_dev);
2389
2390/* Suspend HCI device */
2391int hci_suspend_dev(struct hci_dev *hdev)
2392{
2393 hci_notify(hdev, HCI_DEV_SUSPEND);
2394 return 0;
2395}
2396EXPORT_SYMBOL(hci_suspend_dev);
2397
2398/* Resume HCI device */
2399int hci_resume_dev(struct hci_dev *hdev)
2400{
2401 hci_notify(hdev, HCI_DEV_RESUME);
2402 return 0;
2403}
2404EXPORT_SYMBOL(hci_resume_dev);
2405
76bca880
MH
2406/* Receive frame from HCI drivers */
2407int hci_recv_frame(struct sk_buff *skb)
2408{
2409 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2410 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2411 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2412 kfree_skb(skb);
2413 return -ENXIO;
2414 }
2415
d82603c6 2416 /* Incoming skb */
76bca880
MH
2417 bt_cb(skb)->incoming = 1;
2418
2419 /* Time stamp */
2420 __net_timestamp(skb);
2421
76bca880 2422 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2423 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2424
76bca880
MH
2425 return 0;
2426}
2427EXPORT_SYMBOL(hci_recv_frame);
2428
33e882a5 2429static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2430 int count, __u8 index)
33e882a5
SS
2431{
2432 int len = 0;
2433 int hlen = 0;
2434 int remain = count;
2435 struct sk_buff *skb;
2436 struct bt_skb_cb *scb;
2437
2438 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2439 index >= NUM_REASSEMBLY)
33e882a5
SS
2440 return -EILSEQ;
2441
2442 skb = hdev->reassembly[index];
2443
2444 if (!skb) {
2445 switch (type) {
2446 case HCI_ACLDATA_PKT:
2447 len = HCI_MAX_FRAME_SIZE;
2448 hlen = HCI_ACL_HDR_SIZE;
2449 break;
2450 case HCI_EVENT_PKT:
2451 len = HCI_MAX_EVENT_SIZE;
2452 hlen = HCI_EVENT_HDR_SIZE;
2453 break;
2454 case HCI_SCODATA_PKT:
2455 len = HCI_MAX_SCO_SIZE;
2456 hlen = HCI_SCO_HDR_SIZE;
2457 break;
2458 }
2459
1e429f38 2460 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2461 if (!skb)
2462 return -ENOMEM;
2463
2464 scb = (void *) skb->cb;
2465 scb->expect = hlen;
2466 scb->pkt_type = type;
2467
2468 skb->dev = (void *) hdev;
2469 hdev->reassembly[index] = skb;
2470 }
2471
2472 while (count) {
2473 scb = (void *) skb->cb;
89bb46d0 2474 len = min_t(uint, scb->expect, count);
33e882a5
SS
2475
2476 memcpy(skb_put(skb, len), data, len);
2477
2478 count -= len;
2479 data += len;
2480 scb->expect -= len;
2481 remain = count;
2482
2483 switch (type) {
2484 case HCI_EVENT_PKT:
2485 if (skb->len == HCI_EVENT_HDR_SIZE) {
2486 struct hci_event_hdr *h = hci_event_hdr(skb);
2487 scb->expect = h->plen;
2488
2489 if (skb_tailroom(skb) < scb->expect) {
2490 kfree_skb(skb);
2491 hdev->reassembly[index] = NULL;
2492 return -ENOMEM;
2493 }
2494 }
2495 break;
2496
2497 case HCI_ACLDATA_PKT:
2498 if (skb->len == HCI_ACL_HDR_SIZE) {
2499 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2500 scb->expect = __le16_to_cpu(h->dlen);
2501
2502 if (skb_tailroom(skb) < scb->expect) {
2503 kfree_skb(skb);
2504 hdev->reassembly[index] = NULL;
2505 return -ENOMEM;
2506 }
2507 }
2508 break;
2509
2510 case HCI_SCODATA_PKT:
2511 if (skb->len == HCI_SCO_HDR_SIZE) {
2512 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2513 scb->expect = h->dlen;
2514
2515 if (skb_tailroom(skb) < scb->expect) {
2516 kfree_skb(skb);
2517 hdev->reassembly[index] = NULL;
2518 return -ENOMEM;
2519 }
2520 }
2521 break;
2522 }
2523
2524 if (scb->expect == 0) {
2525 /* Complete frame */
2526
2527 bt_cb(skb)->pkt_type = type;
2528 hci_recv_frame(skb);
2529
2530 hdev->reassembly[index] = NULL;
2531 return remain;
2532 }
2533 }
2534
2535 return remain;
2536}
2537
ef222013
MH
2538int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2539{
f39a3c06
SS
2540 int rem = 0;
2541
ef222013
MH
2542 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2543 return -EILSEQ;
2544
da5f6c37 2545 while (count) {
1e429f38 2546 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2547 if (rem < 0)
2548 return rem;
ef222013 2549
f39a3c06
SS
2550 data += (count - rem);
2551 count = rem;
f81c6224 2552 }
ef222013 2553
f39a3c06 2554 return rem;
ef222013
MH
2555}
2556EXPORT_SYMBOL(hci_recv_fragment);
2557
99811510
SS
2558#define STREAM_REASSEMBLY 0
2559
2560int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2561{
2562 int type;
2563 int rem = 0;
2564
da5f6c37 2565 while (count) {
99811510
SS
2566 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2567
2568 if (!skb) {
2569 struct { char type; } *pkt;
2570
2571 /* Start of the frame */
2572 pkt = data;
2573 type = pkt->type;
2574
2575 data++;
2576 count--;
2577 } else
2578 type = bt_cb(skb)->pkt_type;
2579
1e429f38 2580 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2581 STREAM_REASSEMBLY);
99811510
SS
2582 if (rem < 0)
2583 return rem;
2584
2585 data += (count - rem);
2586 count = rem;
f81c6224 2587 }
99811510
SS
2588
2589 return rem;
2590}
2591EXPORT_SYMBOL(hci_recv_stream_fragment);
2592
1da177e4
LT
2593/* ---- Interface to upper protocols ---- */
2594
1da177e4
LT
2595int hci_register_cb(struct hci_cb *cb)
2596{
2597 BT_DBG("%p name %s", cb, cb->name);
2598
f20d09d5 2599 write_lock(&hci_cb_list_lock);
1da177e4 2600 list_add(&cb->list, &hci_cb_list);
f20d09d5 2601 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2602
2603 return 0;
2604}
2605EXPORT_SYMBOL(hci_register_cb);
2606
2607int hci_unregister_cb(struct hci_cb *cb)
2608{
2609 BT_DBG("%p name %s", cb, cb->name);
2610
f20d09d5 2611 write_lock(&hci_cb_list_lock);
1da177e4 2612 list_del(&cb->list);
f20d09d5 2613 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2614
2615 return 0;
2616}
2617EXPORT_SYMBOL(hci_unregister_cb);
2618
2619static int hci_send_frame(struct sk_buff *skb)
2620{
2621 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2622
2623 if (!hdev) {
2624 kfree_skb(skb);
2625 return -ENODEV;
2626 }
2627
0d48d939 2628 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2629
cd82e61c
MH
2630 /* Time stamp */
2631 __net_timestamp(skb);
1da177e4 2632
cd82e61c
MH
2633 /* Send copy to monitor */
2634 hci_send_to_monitor(hdev, skb);
2635
2636 if (atomic_read(&hdev->promisc)) {
2637 /* Send copy to the sockets */
470fe1b5 2638 hci_send_to_sock(hdev, skb);
1da177e4
LT
2639 }
2640
2641 /* Get rid of skb owner, prior to sending to the driver. */
2642 skb_orphan(skb);
2643
2644 return hdev->send(skb);
2645}
2646
3119ae95
JH
2647void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2648{
2649 skb_queue_head_init(&req->cmd_q);
2650 req->hdev = hdev;
5d73e034 2651 req->err = 0;
3119ae95
JH
2652}
2653
2654int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2655{
2656 struct hci_dev *hdev = req->hdev;
2657 struct sk_buff *skb;
2658 unsigned long flags;
2659
2660 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2661
5d73e034
AG
2662 /* If an error occured during request building, remove all HCI
2663 * commands queued on the HCI request queue.
2664 */
2665 if (req->err) {
2666 skb_queue_purge(&req->cmd_q);
2667 return req->err;
2668 }
2669
3119ae95
JH
2670 /* Do not allow empty requests */
2671 if (skb_queue_empty(&req->cmd_q))
382b0c39 2672 return -ENODATA;
3119ae95
JH
2673
2674 skb = skb_peek_tail(&req->cmd_q);
2675 bt_cb(skb)->req.complete = complete;
2676
2677 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2678 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2679 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2680
2681 queue_work(hdev->workqueue, &hdev->cmd_work);
2682
2683 return 0;
2684}
2685
1ca3a9d0 2686static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 2687 u32 plen, const void *param)
1da177e4
LT
2688{
2689 int len = HCI_COMMAND_HDR_SIZE + plen;
2690 struct hci_command_hdr *hdr;
2691 struct sk_buff *skb;
2692
1da177e4 2693 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2694 if (!skb)
2695 return NULL;
1da177e4
LT
2696
2697 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2698 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2699 hdr->plen = plen;
2700
2701 if (plen)
2702 memcpy(skb_put(skb, plen), param, plen);
2703
2704 BT_DBG("skb len %d", skb->len);
2705
0d48d939 2706 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2707 skb->dev = (void *) hdev;
c78ae283 2708
1ca3a9d0
JH
2709 return skb;
2710}
2711
2712/* Send HCI command */
07dc93dd
JH
2713int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2714 const void *param)
1ca3a9d0
JH
2715{
2716 struct sk_buff *skb;
2717
2718 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2719
2720 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2721 if (!skb) {
2722 BT_ERR("%s no memory for command", hdev->name);
2723 return -ENOMEM;
2724 }
2725
11714b3d
JH
2726 /* Stand-alone HCI commands must be flaged as
2727 * single-command requests.
2728 */
2729 bt_cb(skb)->req.start = true;
2730
1da177e4 2731 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2732 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2733
2734 return 0;
2735}
1da177e4 2736
71c76a17 2737/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
2738void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2739 const void *param, u8 event)
71c76a17
JH
2740{
2741 struct hci_dev *hdev = req->hdev;
2742 struct sk_buff *skb;
2743
2744 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2745
34739c1e
AG
2746 /* If an error occured during request building, there is no point in
2747 * queueing the HCI command. We can simply return.
2748 */
2749 if (req->err)
2750 return;
2751
71c76a17
JH
2752 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2753 if (!skb) {
5d73e034
AG
2754 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2755 hdev->name, opcode);
2756 req->err = -ENOMEM;
e348fe6b 2757 return;
71c76a17
JH
2758 }
2759
2760 if (skb_queue_empty(&req->cmd_q))
2761 bt_cb(skb)->req.start = true;
2762
02350a72
JH
2763 bt_cb(skb)->req.event = event;
2764
71c76a17 2765 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2766}
2767
07dc93dd
JH
2768void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2769 const void *param)
02350a72
JH
2770{
2771 hci_req_add_ev(req, opcode, plen, param, 0);
2772}
2773
1da177e4 2774/* Get data from the previously sent command */
a9de9248 2775void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2776{
2777 struct hci_command_hdr *hdr;
2778
2779 if (!hdev->sent_cmd)
2780 return NULL;
2781
2782 hdr = (void *) hdev->sent_cmd->data;
2783
a9de9248 2784 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2785 return NULL;
2786
f0e09510 2787 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2788
2789 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2790}
2791
2792/* Send ACL data */
2793static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2794{
2795 struct hci_acl_hdr *hdr;
2796 int len = skb->len;
2797
badff6d0
ACM
2798 skb_push(skb, HCI_ACL_HDR_SIZE);
2799 skb_reset_transport_header(skb);
9c70220b 2800 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2801 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2802 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2803}
2804
ee22be7e 2805static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2806 struct sk_buff *skb, __u16 flags)
1da177e4 2807{
ee22be7e 2808 struct hci_conn *conn = chan->conn;
1da177e4
LT
2809 struct hci_dev *hdev = conn->hdev;
2810 struct sk_buff *list;
2811
087bfd99
GP
2812 skb->len = skb_headlen(skb);
2813 skb->data_len = 0;
2814
2815 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2816
2817 switch (hdev->dev_type) {
2818 case HCI_BREDR:
2819 hci_add_acl_hdr(skb, conn->handle, flags);
2820 break;
2821 case HCI_AMP:
2822 hci_add_acl_hdr(skb, chan->handle, flags);
2823 break;
2824 default:
2825 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2826 return;
2827 }
087bfd99 2828
70f23020
AE
2829 list = skb_shinfo(skb)->frag_list;
2830 if (!list) {
1da177e4
LT
2831 /* Non fragmented */
2832 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2833
73d80deb 2834 skb_queue_tail(queue, skb);
1da177e4
LT
2835 } else {
2836 /* Fragmented */
2837 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2838
2839 skb_shinfo(skb)->frag_list = NULL;
2840
2841 /* Queue all fragments atomically */
af3e6359 2842 spin_lock(&queue->lock);
1da177e4 2843
73d80deb 2844 __skb_queue_tail(queue, skb);
e702112f
AE
2845
2846 flags &= ~ACL_START;
2847 flags |= ACL_CONT;
1da177e4
LT
2848 do {
2849 skb = list; list = list->next;
8e87d142 2850
1da177e4 2851 skb->dev = (void *) hdev;
0d48d939 2852 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2853 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2854
2855 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2856
73d80deb 2857 __skb_queue_tail(queue, skb);
1da177e4
LT
2858 } while (list);
2859
af3e6359 2860 spin_unlock(&queue->lock);
1da177e4 2861 }
73d80deb
LAD
2862}
2863
2864void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2865{
ee22be7e 2866 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2867
f0e09510 2868 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2869
2870 skb->dev = (void *) hdev;
73d80deb 2871
ee22be7e 2872 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2873
3eff45ea 2874 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2875}
1da177e4
LT
2876
2877/* Send SCO data */
0d861d8b 2878void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2879{
2880 struct hci_dev *hdev = conn->hdev;
2881 struct hci_sco_hdr hdr;
2882
2883 BT_DBG("%s len %d", hdev->name, skb->len);
2884
aca3192c 2885 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2886 hdr.dlen = skb->len;
2887
badff6d0
ACM
2888 skb_push(skb, HCI_SCO_HDR_SIZE);
2889 skb_reset_transport_header(skb);
9c70220b 2890 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2891
2892 skb->dev = (void *) hdev;
0d48d939 2893 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2894
1da177e4 2895 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2896 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2897}
1da177e4
LT
2898
2899/* ---- HCI TX task (outgoing data) ---- */
2900
2901/* HCI Connection scheduler */
6039aa73
GP
2902static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2903 int *quote)
1da177e4
LT
2904{
2905 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2906 struct hci_conn *conn = NULL, *c;
abc5de8f 2907 unsigned int num = 0, min = ~0;
1da177e4 2908
8e87d142 2909 /* We don't have to lock device here. Connections are always
1da177e4 2910 * added and removed with TX task disabled. */
bf4c6325
GP
2911
2912 rcu_read_lock();
2913
2914 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2915 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2916 continue;
769be974
MH
2917
2918 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2919 continue;
2920
1da177e4
LT
2921 num++;
2922
2923 if (c->sent < min) {
2924 min = c->sent;
2925 conn = c;
2926 }
52087a79
LAD
2927
2928 if (hci_conn_num(hdev, type) == num)
2929 break;
1da177e4
LT
2930 }
2931
bf4c6325
GP
2932 rcu_read_unlock();
2933
1da177e4 2934 if (conn) {
6ed58ec5
VT
2935 int cnt, q;
2936
2937 switch (conn->type) {
2938 case ACL_LINK:
2939 cnt = hdev->acl_cnt;
2940 break;
2941 case SCO_LINK:
2942 case ESCO_LINK:
2943 cnt = hdev->sco_cnt;
2944 break;
2945 case LE_LINK:
2946 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2947 break;
2948 default:
2949 cnt = 0;
2950 BT_ERR("Unknown link type");
2951 }
2952
2953 q = cnt / num;
1da177e4
LT
2954 *quote = q ? q : 1;
2955 } else
2956 *quote = 0;
2957
2958 BT_DBG("conn %p quote %d", conn, *quote);
2959 return conn;
2960}
2961
6039aa73 2962static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2963{
2964 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2965 struct hci_conn *c;
1da177e4 2966
bae1f5d9 2967 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2968
bf4c6325
GP
2969 rcu_read_lock();
2970
1da177e4 2971 /* Kill stalled connections */
bf4c6325 2972 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2973 if (c->type == type && c->sent) {
6ed93dc6
AE
2974 BT_ERR("%s killing stalled connection %pMR",
2975 hdev->name, &c->dst);
bed71748 2976 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2977 }
2978 }
bf4c6325
GP
2979
2980 rcu_read_unlock();
1da177e4
LT
2981}
2982
6039aa73
GP
2983static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2984 int *quote)
1da177e4 2985{
73d80deb
LAD
2986 struct hci_conn_hash *h = &hdev->conn_hash;
2987 struct hci_chan *chan = NULL;
abc5de8f 2988 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2989 struct hci_conn *conn;
73d80deb
LAD
2990 int cnt, q, conn_num = 0;
2991
2992 BT_DBG("%s", hdev->name);
2993
bf4c6325
GP
2994 rcu_read_lock();
2995
2996 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2997 struct hci_chan *tmp;
2998
2999 if (conn->type != type)
3000 continue;
3001
3002 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3003 continue;
3004
3005 conn_num++;
3006
8192edef 3007 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3008 struct sk_buff *skb;
3009
3010 if (skb_queue_empty(&tmp->data_q))
3011 continue;
3012
3013 skb = skb_peek(&tmp->data_q);
3014 if (skb->priority < cur_prio)
3015 continue;
3016
3017 if (skb->priority > cur_prio) {
3018 num = 0;
3019 min = ~0;
3020 cur_prio = skb->priority;
3021 }
3022
3023 num++;
3024
3025 if (conn->sent < min) {
3026 min = conn->sent;
3027 chan = tmp;
3028 }
3029 }
3030
3031 if (hci_conn_num(hdev, type) == conn_num)
3032 break;
3033 }
3034
bf4c6325
GP
3035 rcu_read_unlock();
3036
73d80deb
LAD
3037 if (!chan)
3038 return NULL;
3039
3040 switch (chan->conn->type) {
3041 case ACL_LINK:
3042 cnt = hdev->acl_cnt;
3043 break;
bd1eb66b
AE
3044 case AMP_LINK:
3045 cnt = hdev->block_cnt;
3046 break;
73d80deb
LAD
3047 case SCO_LINK:
3048 case ESCO_LINK:
3049 cnt = hdev->sco_cnt;
3050 break;
3051 case LE_LINK:
3052 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3053 break;
3054 default:
3055 cnt = 0;
3056 BT_ERR("Unknown link type");
3057 }
3058
3059 q = cnt / num;
3060 *quote = q ? q : 1;
3061 BT_DBG("chan %p quote %d", chan, *quote);
3062 return chan;
3063}
3064
02b20f0b
LAD
3065static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3066{
3067 struct hci_conn_hash *h = &hdev->conn_hash;
3068 struct hci_conn *conn;
3069 int num = 0;
3070
3071 BT_DBG("%s", hdev->name);
3072
bf4c6325
GP
3073 rcu_read_lock();
3074
3075 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3076 struct hci_chan *chan;
3077
3078 if (conn->type != type)
3079 continue;
3080
3081 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3082 continue;
3083
3084 num++;
3085
8192edef 3086 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3087 struct sk_buff *skb;
3088
3089 if (chan->sent) {
3090 chan->sent = 0;
3091 continue;
3092 }
3093
3094 if (skb_queue_empty(&chan->data_q))
3095 continue;
3096
3097 skb = skb_peek(&chan->data_q);
3098 if (skb->priority >= HCI_PRIO_MAX - 1)
3099 continue;
3100
3101 skb->priority = HCI_PRIO_MAX - 1;
3102
3103 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3104 skb->priority);
02b20f0b
LAD
3105 }
3106
3107 if (hci_conn_num(hdev, type) == num)
3108 break;
3109 }
bf4c6325
GP
3110
3111 rcu_read_unlock();
3112
02b20f0b
LAD
3113}
3114
b71d385a
AE
3115static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3116{
3117 /* Calculate count of blocks used by this packet */
3118 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3119}
3120
6039aa73 3121static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3122{
1da177e4
LT
3123 if (!test_bit(HCI_RAW, &hdev->flags)) {
3124 /* ACL tx timeout must be longer than maximum
3125 * link supervision timeout (40.9 seconds) */
63d2bc1b 3126 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3127 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3128 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3129 }
63d2bc1b 3130}
1da177e4 3131
6039aa73 3132static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3133{
3134 unsigned int cnt = hdev->acl_cnt;
3135 struct hci_chan *chan;
3136 struct sk_buff *skb;
3137 int quote;
3138
3139 __check_timeout(hdev, cnt);
04837f64 3140
73d80deb 3141 while (hdev->acl_cnt &&
a8c5fb1a 3142 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3143 u32 priority = (skb_peek(&chan->data_q))->priority;
3144 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3145 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3146 skb->len, skb->priority);
73d80deb 3147
ec1cce24
LAD
3148 /* Stop if priority has changed */
3149 if (skb->priority < priority)
3150 break;
3151
3152 skb = skb_dequeue(&chan->data_q);
3153
73d80deb 3154 hci_conn_enter_active_mode(chan->conn,
04124681 3155 bt_cb(skb)->force_active);
04837f64 3156
1da177e4
LT
3157 hci_send_frame(skb);
3158 hdev->acl_last_tx = jiffies;
3159
3160 hdev->acl_cnt--;
73d80deb
LAD
3161 chan->sent++;
3162 chan->conn->sent++;
1da177e4
LT
3163 }
3164 }
02b20f0b
LAD
3165
3166 if (cnt != hdev->acl_cnt)
3167 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3168}
3169
6039aa73 3170static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3171{
63d2bc1b 3172 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3173 struct hci_chan *chan;
3174 struct sk_buff *skb;
3175 int quote;
bd1eb66b 3176 u8 type;
b71d385a 3177
63d2bc1b 3178 __check_timeout(hdev, cnt);
b71d385a 3179
bd1eb66b
AE
3180 BT_DBG("%s", hdev->name);
3181
3182 if (hdev->dev_type == HCI_AMP)
3183 type = AMP_LINK;
3184 else
3185 type = ACL_LINK;
3186
b71d385a 3187 while (hdev->block_cnt > 0 &&
bd1eb66b 3188 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3189 u32 priority = (skb_peek(&chan->data_q))->priority;
3190 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3191 int blocks;
3192
3193 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3194 skb->len, skb->priority);
b71d385a
AE
3195
3196 /* Stop if priority has changed */
3197 if (skb->priority < priority)
3198 break;
3199
3200 skb = skb_dequeue(&chan->data_q);
3201
3202 blocks = __get_blocks(hdev, skb);
3203 if (blocks > hdev->block_cnt)
3204 return;
3205
3206 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3207 bt_cb(skb)->force_active);
b71d385a
AE
3208
3209 hci_send_frame(skb);
3210 hdev->acl_last_tx = jiffies;
3211
3212 hdev->block_cnt -= blocks;
3213 quote -= blocks;
3214
3215 chan->sent += blocks;
3216 chan->conn->sent += blocks;
3217 }
3218 }
3219
3220 if (cnt != hdev->block_cnt)
bd1eb66b 3221 hci_prio_recalculate(hdev, type);
b71d385a
AE
3222}
3223
6039aa73 3224static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3225{
3226 BT_DBG("%s", hdev->name);
3227
bd1eb66b
AE
3228 /* No ACL link over BR/EDR controller */
3229 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3230 return;
3231
3232 /* No AMP link over AMP controller */
3233 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3234 return;
3235
3236 switch (hdev->flow_ctl_mode) {
3237 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3238 hci_sched_acl_pkt(hdev);
3239 break;
3240
3241 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3242 hci_sched_acl_blk(hdev);
3243 break;
3244 }
3245}
3246
1da177e4 3247/* Schedule SCO */
6039aa73 3248static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3249{
3250 struct hci_conn *conn;
3251 struct sk_buff *skb;
3252 int quote;
3253
3254 BT_DBG("%s", hdev->name);
3255
52087a79
LAD
3256 if (!hci_conn_num(hdev, SCO_LINK))
3257 return;
3258
1da177e4
LT
3259 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3260 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3261 BT_DBG("skb %p len %d", skb, skb->len);
3262 hci_send_frame(skb);
3263
3264 conn->sent++;
3265 if (conn->sent == ~0)
3266 conn->sent = 0;
3267 }
3268 }
3269}
3270
6039aa73 3271static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3272{
3273 struct hci_conn *conn;
3274 struct sk_buff *skb;
3275 int quote;
3276
3277 BT_DBG("%s", hdev->name);
3278
52087a79
LAD
3279 if (!hci_conn_num(hdev, ESCO_LINK))
3280 return;
3281
8fc9ced3
GP
3282 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3283 &quote))) {
b6a0dc82
MH
3284 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3285 BT_DBG("skb %p len %d", skb, skb->len);
3286 hci_send_frame(skb);
3287
3288 conn->sent++;
3289 if (conn->sent == ~0)
3290 conn->sent = 0;
3291 }
3292 }
3293}
3294
6039aa73 3295static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3296{
73d80deb 3297 struct hci_chan *chan;
6ed58ec5 3298 struct sk_buff *skb;
02b20f0b 3299 int quote, cnt, tmp;
6ed58ec5
VT
3300
3301 BT_DBG("%s", hdev->name);
3302
52087a79
LAD
3303 if (!hci_conn_num(hdev, LE_LINK))
3304 return;
3305
6ed58ec5
VT
3306 if (!test_bit(HCI_RAW, &hdev->flags)) {
3307 /* LE tx timeout must be longer than maximum
3308 * link supervision timeout (40.9 seconds) */
bae1f5d9 3309 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3310 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3311 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3312 }
3313
3314 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3315 tmp = cnt;
73d80deb 3316 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3317 u32 priority = (skb_peek(&chan->data_q))->priority;
3318 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3319 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3320 skb->len, skb->priority);
6ed58ec5 3321
ec1cce24
LAD
3322 /* Stop if priority has changed */
3323 if (skb->priority < priority)
3324 break;
3325
3326 skb = skb_dequeue(&chan->data_q);
3327
6ed58ec5
VT
3328 hci_send_frame(skb);
3329 hdev->le_last_tx = jiffies;
3330
3331 cnt--;
73d80deb
LAD
3332 chan->sent++;
3333 chan->conn->sent++;
6ed58ec5
VT
3334 }
3335 }
73d80deb 3336
6ed58ec5
VT
3337 if (hdev->le_pkts)
3338 hdev->le_cnt = cnt;
3339 else
3340 hdev->acl_cnt = cnt;
02b20f0b
LAD
3341
3342 if (cnt != tmp)
3343 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3344}
3345
3eff45ea 3346static void hci_tx_work(struct work_struct *work)
1da177e4 3347{
3eff45ea 3348 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3349 struct sk_buff *skb;
3350
6ed58ec5 3351 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3352 hdev->sco_cnt, hdev->le_cnt);
1da177e4 3353
52de599e
MH
3354 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3355 /* Schedule queues and send stuff to HCI driver */
3356 hci_sched_acl(hdev);
3357 hci_sched_sco(hdev);
3358 hci_sched_esco(hdev);
3359 hci_sched_le(hdev);
3360 }
6ed58ec5 3361
1da177e4
LT
3362 /* Send next queued raw (unknown type) packet */
3363 while ((skb = skb_dequeue(&hdev->raw_q)))
3364 hci_send_frame(skb);
1da177e4
LT
3365}
3366
25985edc 3367/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3368
3369/* ACL data packet */
6039aa73 3370static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3371{
3372 struct hci_acl_hdr *hdr = (void *) skb->data;
3373 struct hci_conn *conn;
3374 __u16 handle, flags;
3375
3376 skb_pull(skb, HCI_ACL_HDR_SIZE);
3377
3378 handle = __le16_to_cpu(hdr->handle);
3379 flags = hci_flags(handle);
3380 handle = hci_handle(handle);
3381
f0e09510 3382 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3383 handle, flags);
1da177e4
LT
3384
3385 hdev->stat.acl_rx++;
3386
3387 hci_dev_lock(hdev);
3388 conn = hci_conn_hash_lookup_handle(hdev, handle);
3389 hci_dev_unlock(hdev);
8e87d142 3390
1da177e4 3391 if (conn) {
65983fc7 3392 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3393
1da177e4 3394 /* Send to upper protocol */
686ebf28
UF
3395 l2cap_recv_acldata(conn, skb, flags);
3396 return;
1da177e4 3397 } else {
8e87d142 3398 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3399 hdev->name, handle);
1da177e4
LT
3400 }
3401
3402 kfree_skb(skb);
3403}
3404
3405/* SCO data packet */
6039aa73 3406static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3407{
3408 struct hci_sco_hdr *hdr = (void *) skb->data;
3409 struct hci_conn *conn;
3410 __u16 handle;
3411
3412 skb_pull(skb, HCI_SCO_HDR_SIZE);
3413
3414 handle = __le16_to_cpu(hdr->handle);
3415
f0e09510 3416 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3417
3418 hdev->stat.sco_rx++;
3419
3420 hci_dev_lock(hdev);
3421 conn = hci_conn_hash_lookup_handle(hdev, handle);
3422 hci_dev_unlock(hdev);
3423
3424 if (conn) {
1da177e4 3425 /* Send to upper protocol */
686ebf28
UF
3426 sco_recv_scodata(conn, skb);
3427 return;
1da177e4 3428 } else {
8e87d142 3429 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3430 hdev->name, handle);
1da177e4
LT
3431 }
3432
3433 kfree_skb(skb);
3434}
3435
9238f36a
JH
3436static bool hci_req_is_complete(struct hci_dev *hdev)
3437{
3438 struct sk_buff *skb;
3439
3440 skb = skb_peek(&hdev->cmd_q);
3441 if (!skb)
3442 return true;
3443
3444 return bt_cb(skb)->req.start;
3445}
3446
42c6b129
JH
3447static void hci_resend_last(struct hci_dev *hdev)
3448{
3449 struct hci_command_hdr *sent;
3450 struct sk_buff *skb;
3451 u16 opcode;
3452
3453 if (!hdev->sent_cmd)
3454 return;
3455
3456 sent = (void *) hdev->sent_cmd->data;
3457 opcode = __le16_to_cpu(sent->opcode);
3458 if (opcode == HCI_OP_RESET)
3459 return;
3460
3461 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3462 if (!skb)
3463 return;
3464
3465 skb_queue_head(&hdev->cmd_q, skb);
3466 queue_work(hdev->workqueue, &hdev->cmd_work);
3467}
3468
9238f36a
JH
3469void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3470{
3471 hci_req_complete_t req_complete = NULL;
3472 struct sk_buff *skb;
3473 unsigned long flags;
3474
3475 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3476
42c6b129
JH
3477 /* If the completed command doesn't match the last one that was
3478 * sent we need to do special handling of it.
9238f36a 3479 */
42c6b129
JH
3480 if (!hci_sent_cmd_data(hdev, opcode)) {
3481 /* Some CSR based controllers generate a spontaneous
3482 * reset complete event during init and any pending
3483 * command will never be completed. In such a case we
3484 * need to resend whatever was the last sent
3485 * command.
3486 */
3487 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3488 hci_resend_last(hdev);
3489
9238f36a 3490 return;
42c6b129 3491 }
9238f36a
JH
3492
3493 /* If the command succeeded and there's still more commands in
3494 * this request the request is not yet complete.
3495 */
3496 if (!status && !hci_req_is_complete(hdev))
3497 return;
3498
3499 /* If this was the last command in a request the complete
3500 * callback would be found in hdev->sent_cmd instead of the
3501 * command queue (hdev->cmd_q).
3502 */
3503 if (hdev->sent_cmd) {
3504 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
3505
3506 if (req_complete) {
3507 /* We must set the complete callback to NULL to
3508 * avoid calling the callback more than once if
3509 * this function gets called again.
3510 */
3511 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3512
9238f36a 3513 goto call_complete;
53e21fbc 3514 }
9238f36a
JH
3515 }
3516
3517 /* Remove all pending commands belonging to this request */
3518 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3519 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3520 if (bt_cb(skb)->req.start) {
3521 __skb_queue_head(&hdev->cmd_q, skb);
3522 break;
3523 }
3524
3525 req_complete = bt_cb(skb)->req.complete;
3526 kfree_skb(skb);
3527 }
3528 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3529
3530call_complete:
3531 if (req_complete)
3532 req_complete(hdev, status);
3533}
3534
b78752cc 3535static void hci_rx_work(struct work_struct *work)
1da177e4 3536{
b78752cc 3537 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3538 struct sk_buff *skb;
3539
3540 BT_DBG("%s", hdev->name);
3541
1da177e4 3542 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3543 /* Send copy to monitor */
3544 hci_send_to_monitor(hdev, skb);
3545
1da177e4
LT
3546 if (atomic_read(&hdev->promisc)) {
3547 /* Send copy to the sockets */
470fe1b5 3548 hci_send_to_sock(hdev, skb);
1da177e4
LT
3549 }
3550
0736cfa8
MH
3551 if (test_bit(HCI_RAW, &hdev->flags) ||
3552 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
3553 kfree_skb(skb);
3554 continue;
3555 }
3556
3557 if (test_bit(HCI_INIT, &hdev->flags)) {
3558 /* Don't process data packets in this states. */
0d48d939 3559 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3560 case HCI_ACLDATA_PKT:
3561 case HCI_SCODATA_PKT:
3562 kfree_skb(skb);
3563 continue;
3ff50b79 3564 }
1da177e4
LT
3565 }
3566
3567 /* Process frame */
0d48d939 3568 switch (bt_cb(skb)->pkt_type) {
1da177e4 3569 case HCI_EVENT_PKT:
b78752cc 3570 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3571 hci_event_packet(hdev, skb);
3572 break;
3573
3574 case HCI_ACLDATA_PKT:
3575 BT_DBG("%s ACL data packet", hdev->name);
3576 hci_acldata_packet(hdev, skb);
3577 break;
3578
3579 case HCI_SCODATA_PKT:
3580 BT_DBG("%s SCO data packet", hdev->name);
3581 hci_scodata_packet(hdev, skb);
3582 break;
3583
3584 default:
3585 kfree_skb(skb);
3586 break;
3587 }
3588 }
1da177e4
LT
3589}
3590
c347b765 3591static void hci_cmd_work(struct work_struct *work)
1da177e4 3592{
c347b765 3593 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3594 struct sk_buff *skb;
3595
2104786b
AE
3596 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3597 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3598
1da177e4 3599 /* Send queued commands */
5a08ecce
AE
3600 if (atomic_read(&hdev->cmd_cnt)) {
3601 skb = skb_dequeue(&hdev->cmd_q);
3602 if (!skb)
3603 return;
3604
7585b97a 3605 kfree_skb(hdev->sent_cmd);
1da177e4 3606
a675d7f1 3607 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 3608 if (hdev->sent_cmd) {
1da177e4
LT
3609 atomic_dec(&hdev->cmd_cnt);
3610 hci_send_frame(skb);
7bdb8a5c
SJ
3611 if (test_bit(HCI_RESET, &hdev->flags))
3612 del_timer(&hdev->cmd_timer);
3613 else
3614 mod_timer(&hdev->cmd_timer,
5f246e89 3615 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3616 } else {
3617 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3618 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3619 }
3620 }
3621}
2519a1fc 3622
31f7956c
AG
3623u8 bdaddr_to_le(u8 bdaddr_type)
3624{
3625 switch (bdaddr_type) {
3626 case BDADDR_LE_PUBLIC:
3627 return ADDR_LE_DEV_PUBLIC;
3628
3629 default:
3630 /* Fallback to LE Random address type */
3631 return ADDR_LE_DEV_RANDOM;
3632 }
3633}
This page took 0.906655 seconds and 5 git commands to generate.