Merge branch 'misc' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
77a63e0a
FW
82static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
75e84b7c
JH
84{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
7b1abbbe
JH
107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
75e84b7c
JH
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
7b1abbbe 137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 138 const void *param, u8 event, u32 timeout)
75e84b7c
JH
139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
7b1abbbe 148 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
7b1abbbe
JH
187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 192 const void *param, u32 timeout)
7b1abbbe
JH
193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
1da177e4 198/* Execute request and wait for completion. */
01178cd4 199static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
200 void (*func)(struct hci_request *req,
201 unsigned long opt),
01178cd4 202 unsigned long opt, __u32 timeout)
1da177e4 203{
42c6b129 204 struct hci_request req;
1da177e4
LT
205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
42c6b129
JH
210 hci_req_init(&req, hdev);
211
1da177e4
LT
212 hdev->req_status = HCI_REQ_PEND;
213
42c6b129 214 func(&req, opt);
53cce22d 215
42c6b129
JH
216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
53cce22d 218 hdev->req_status = 0;
920c8300
AG
219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
42c6b129 224 */
920c8300
AG
225 if (err == -ENODATA)
226 return 0;
227
228 return err;
53cce22d
JH
229 }
230
bc4445c7
AG
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
1da177e4
LT
234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
e175072f 243 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
3ff50b79 253 }
1da177e4 254
a5040efa 255 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
01178cd4 262static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
263 void (*req)(struct hci_request *req,
264 unsigned long opt),
01178cd4 265 unsigned long opt, __u32 timeout)
1da177e4
LT
266{
267 int ret;
268
7c6a329e
MH
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
1da177e4
LT
272 /* Serialize all requests */
273 hci_req_lock(hdev);
01178cd4 274 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
42c6b129 280static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 281{
42c6b129 282 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
283
284 /* Reset device */
42c6b129
JH
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
287}
288
42c6b129 289static void bredr_init(struct hci_request *req)
1da177e4 290{
42c6b129 291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 292
1da177e4 293 /* Read Local Supported Features */
42c6b129 294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 295
1143e5a6 296 /* Read Local Version */
42c6b129 297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
298
299 /* Read BD Address */
42c6b129 300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
301}
302
42c6b129 303static void amp_init(struct hci_request *req)
e61ef499 304{
42c6b129 305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 306
e61ef499 307 /* Read Local Version */
42c6b129 308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
309
310 /* Read Local AMP Info */
42c6b129 311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
312
313 /* Read Data Blk size */
42c6b129 314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
315}
316
42c6b129 317static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 318{
42c6b129 319 struct hci_dev *hdev = req->hdev;
e61ef499
AE
320
321 BT_DBG("%s %ld", hdev->name, opt);
322
11778716
AE
323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 325 hci_reset_req(req, 0);
11778716 326
e61ef499
AE
327 switch (hdev->dev_type) {
328 case HCI_BREDR:
42c6b129 329 bredr_init(req);
e61ef499
AE
330 break;
331
332 case HCI_AMP:
42c6b129 333 amp_init(req);
e61ef499
AE
334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
e61ef499
AE
340}
341
42c6b129 342static void bredr_setup(struct hci_request *req)
2177bab5 343{
2177bab5
JH
344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
349
350 /* Read Class of Device */
42c6b129 351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
352
353 /* Read Local Name */
42c6b129 354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
355
356 /* Read Voice Setting */
42c6b129 357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
42c6b129 365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 366
f332ec66
JH
367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
2177bab5
JH
372}
373
42c6b129 374static void le_setup(struct hci_request *req)
2177bab5 375{
c73eee91
JH
376 struct hci_dev *hdev = req->hdev;
377
2177bab5 378 /* Read LE Buffer Size */
42c6b129 379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
380
381 /* Read LE Local Supported Features */
42c6b129 382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
383
384 /* Read LE Advertising Channel TX Power */
42c6b129 385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
386
387 /* Read LE White List Size */
42c6b129 388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
389
390 /* Read LE Supported States */
42c6b129 391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
42c6b129 426static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
427{
428 u8 mode;
429
42c6b129 430 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 431
42c6b129 432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
433}
434
42c6b129 435static void hci_setup_event_mask(struct hci_request *req)
2177bab5 436{
42c6b129
JH
437 struct hci_dev *hdev = req->hdev;
438
2177bab5
JH
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
457 }
458
459 if (lmp_inq_rssi_capable(hdev))
460 events[4] |= 0x02; /* Inquiry Result with RSSI */
461
462 if (lmp_sniffsubr_capable(hdev))
463 events[5] |= 0x20; /* Sniff Subrating */
464
465 if (lmp_pause_enc_capable(hdev))
466 events[5] |= 0x80; /* Encryption Key Refresh Complete */
467
468 if (lmp_ext_inq_capable(hdev))
469 events[5] |= 0x40; /* Extended Inquiry Result */
470
471 if (lmp_no_flush_capable(hdev))
472 events[7] |= 0x01; /* Enhanced Flush Complete */
473
474 if (lmp_lsto_capable(hdev))
475 events[6] |= 0x80; /* Link Supervision Timeout Changed */
476
477 if (lmp_ssp_capable(hdev)) {
478 events[6] |= 0x01; /* IO Capability Request */
479 events[6] |= 0x02; /* IO Capability Response */
480 events[6] |= 0x04; /* User Confirmation Request */
481 events[6] |= 0x08; /* User Passkey Request */
482 events[6] |= 0x10; /* Remote OOB Data Request */
483 events[6] |= 0x20; /* Simple Pairing Complete */
484 events[7] |= 0x04; /* User Passkey Notification */
485 events[7] |= 0x08; /* Keypress Notification */
486 events[7] |= 0x10; /* Remote Host Supported
487 * Features Notification
488 */
489 }
490
491 if (lmp_le_capable(hdev))
492 events[7] |= 0x20; /* LE Meta-Event */
493
42c6b129 494 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
495
496 if (lmp_le_capable(hdev)) {
497 memset(events, 0, sizeof(events));
498 events[0] = 0x1f;
42c6b129
JH
499 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500 sizeof(events), events);
2177bab5
JH
501 }
502}
503
42c6b129 504static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 505{
42c6b129
JH
506 struct hci_dev *hdev = req->hdev;
507
2177bab5 508 if (lmp_bredr_capable(hdev))
42c6b129 509 bredr_setup(req);
2177bab5
JH
510
511 if (lmp_le_capable(hdev))
42c6b129 512 le_setup(req);
2177bab5 513
42c6b129 514 hci_setup_event_mask(req);
2177bab5
JH
515
516 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 517 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
518
519 if (lmp_ssp_capable(hdev)) {
520 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
521 u8 mode = 0x01;
42c6b129
JH
522 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
523 sizeof(mode), &mode);
2177bab5
JH
524 } else {
525 struct hci_cp_write_eir cp;
526
527 memset(hdev->eir, 0, sizeof(hdev->eir));
528 memset(&cp, 0, sizeof(cp));
529
42c6b129 530 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
531 }
532 }
533
534 if (lmp_inq_rssi_capable(hdev))
42c6b129 535 hci_setup_inquiry_mode(req);
2177bab5
JH
536
537 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 538 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
539
540 if (lmp_ext_feat_capable(hdev)) {
541 struct hci_cp_read_local_ext_features cp;
542
543 cp.page = 0x01;
42c6b129
JH
544 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
545 sizeof(cp), &cp);
2177bab5
JH
546 }
547
548 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
549 u8 enable = 1;
42c6b129
JH
550 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
551 &enable);
2177bab5
JH
552 }
553}
554
42c6b129 555static void hci_setup_link_policy(struct hci_request *req)
2177bab5 556{
42c6b129 557 struct hci_dev *hdev = req->hdev;
2177bab5
JH
558 struct hci_cp_write_def_link_policy cp;
559 u16 link_policy = 0;
560
561 if (lmp_rswitch_capable(hdev))
562 link_policy |= HCI_LP_RSWITCH;
563 if (lmp_hold_capable(hdev))
564 link_policy |= HCI_LP_HOLD;
565 if (lmp_sniff_capable(hdev))
566 link_policy |= HCI_LP_SNIFF;
567 if (lmp_park_capable(hdev))
568 link_policy |= HCI_LP_PARK;
569
570 cp.policy = cpu_to_le16(link_policy);
42c6b129 571 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
572}
573
42c6b129 574static void hci_set_le_support(struct hci_request *req)
2177bab5 575{
42c6b129 576 struct hci_dev *hdev = req->hdev;
2177bab5
JH
577 struct hci_cp_write_le_host_supported cp;
578
c73eee91
JH
579 /* LE-only devices do not support explicit enablement */
580 if (!lmp_bredr_capable(hdev))
581 return;
582
2177bab5
JH
583 memset(&cp, 0, sizeof(cp));
584
585 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
586 cp.le = 0x01;
587 cp.simul = lmp_le_br_capable(hdev);
588 }
589
590 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
591 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
592 &cp);
2177bab5
JH
593}
594
42c6b129 595static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 596{
42c6b129 597 struct hci_dev *hdev = req->hdev;
d2c5d77f 598 u8 p;
42c6b129 599
b8f4e068
GP
600 /* Some Broadcom based Bluetooth controllers do not support the
601 * Delete Stored Link Key command. They are clearly indicating its
602 * absence in the bit mask of supported commands.
603 *
604 * Check the supported commands and only if the the command is marked
605 * as supported send it. If not supported assume that the controller
606 * does not have actual support for stored link keys which makes this
607 * command redundant anyway.
608 */
59f45d57
JH
609 if (hdev->commands[6] & 0x80) {
610 struct hci_cp_delete_stored_link_key cp;
611
612 bacpy(&cp.bdaddr, BDADDR_ANY);
613 cp.delete_all = 0x01;
614 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
615 sizeof(cp), &cp);
616 }
617
2177bab5 618 if (hdev->commands[5] & 0x10)
42c6b129 619 hci_setup_link_policy(req);
2177bab5 620
04b4edcb 621 if (lmp_le_capable(hdev)) {
42c6b129 622 hci_set_le_support(req);
04b4edcb
JH
623 hci_update_ad(req);
624 }
d2c5d77f
JH
625
626 /* Read features beyond page 1 if available */
627 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
628 struct hci_cp_read_local_ext_features cp;
629
630 cp.page = p;
631 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
632 sizeof(cp), &cp);
633 }
2177bab5
JH
634}
635
636static int __hci_init(struct hci_dev *hdev)
637{
638 int err;
639
640 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
641 if (err < 0)
642 return err;
643
644 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
645 * BR/EDR/LE type controllers. AMP controllers only need the
646 * first stage init.
647 */
648 if (hdev->dev_type != HCI_BREDR)
649 return 0;
650
651 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
652 if (err < 0)
653 return err;
654
655 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
656}
657
42c6b129 658static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
659{
660 __u8 scan = opt;
661
42c6b129 662 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
663
664 /* Inquiry and Page scans */
42c6b129 665 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
666}
667
42c6b129 668static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
669{
670 __u8 auth = opt;
671
42c6b129 672 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
673
674 /* Authentication */
42c6b129 675 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
676}
677
42c6b129 678static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
679{
680 __u8 encrypt = opt;
681
42c6b129 682 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 683
e4e8e37c 684 /* Encryption */
42c6b129 685 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
686}
687
42c6b129 688static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
689{
690 __le16 policy = cpu_to_le16(opt);
691
42c6b129 692 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
693
694 /* Default link policy */
42c6b129 695 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
696}
697
8e87d142 698/* Get HCI device by index.
1da177e4
LT
699 * Device is held on return. */
700struct hci_dev *hci_dev_get(int index)
701{
8035ded4 702 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
703
704 BT_DBG("%d", index);
705
706 if (index < 0)
707 return NULL;
708
709 read_lock(&hci_dev_list_lock);
8035ded4 710 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
711 if (d->id == index) {
712 hdev = hci_dev_hold(d);
713 break;
714 }
715 }
716 read_unlock(&hci_dev_list_lock);
717 return hdev;
718}
1da177e4
LT
719
720/* ---- Inquiry support ---- */
ff9ef578 721
30dc78e1
JH
722bool hci_discovery_active(struct hci_dev *hdev)
723{
724 struct discovery_state *discov = &hdev->discovery;
725
6fbe195d 726 switch (discov->state) {
343f935b 727 case DISCOVERY_FINDING:
6fbe195d 728 case DISCOVERY_RESOLVING:
30dc78e1
JH
729 return true;
730
6fbe195d
AG
731 default:
732 return false;
733 }
30dc78e1
JH
734}
735
ff9ef578
JH
736void hci_discovery_set_state(struct hci_dev *hdev, int state)
737{
738 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
739
740 if (hdev->discovery.state == state)
741 return;
742
743 switch (state) {
744 case DISCOVERY_STOPPED:
7b99b659
AG
745 if (hdev->discovery.state != DISCOVERY_STARTING)
746 mgmt_discovering(hdev, 0);
ff9ef578
JH
747 break;
748 case DISCOVERY_STARTING:
749 break;
343f935b 750 case DISCOVERY_FINDING:
ff9ef578
JH
751 mgmt_discovering(hdev, 1);
752 break;
30dc78e1
JH
753 case DISCOVERY_RESOLVING:
754 break;
ff9ef578
JH
755 case DISCOVERY_STOPPING:
756 break;
757 }
758
759 hdev->discovery.state = state;
760}
761
1f9b9a5d 762void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 763{
30883512 764 struct discovery_state *cache = &hdev->discovery;
b57c1a56 765 struct inquiry_entry *p, *n;
1da177e4 766
561aafbc
JH
767 list_for_each_entry_safe(p, n, &cache->all, all) {
768 list_del(&p->all);
b57c1a56 769 kfree(p);
1da177e4 770 }
561aafbc
JH
771
772 INIT_LIST_HEAD(&cache->unknown);
773 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
774}
775
a8c5fb1a
GP
776struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
777 bdaddr_t *bdaddr)
1da177e4 778{
30883512 779 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
780 struct inquiry_entry *e;
781
6ed93dc6 782 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 783
561aafbc
JH
784 list_for_each_entry(e, &cache->all, all) {
785 if (!bacmp(&e->data.bdaddr, bdaddr))
786 return e;
787 }
788
789 return NULL;
790}
791
792struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 793 bdaddr_t *bdaddr)
561aafbc 794{
30883512 795 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
796 struct inquiry_entry *e;
797
6ed93dc6 798 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
799
800 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 801 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
802 return e;
803 }
804
805 return NULL;
1da177e4
LT
806}
807
30dc78e1 808struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
809 bdaddr_t *bdaddr,
810 int state)
30dc78e1
JH
811{
812 struct discovery_state *cache = &hdev->discovery;
813 struct inquiry_entry *e;
814
6ed93dc6 815 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
816
817 list_for_each_entry(e, &cache->resolve, list) {
818 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
819 return e;
820 if (!bacmp(&e->data.bdaddr, bdaddr))
821 return e;
822 }
823
824 return NULL;
825}
826
a3d4e20a 827void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 828 struct inquiry_entry *ie)
a3d4e20a
JH
829{
830 struct discovery_state *cache = &hdev->discovery;
831 struct list_head *pos = &cache->resolve;
832 struct inquiry_entry *p;
833
834 list_del(&ie->list);
835
836 list_for_each_entry(p, &cache->resolve, list) {
837 if (p->name_state != NAME_PENDING &&
a8c5fb1a 838 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
839 break;
840 pos = &p->list;
841 }
842
843 list_add(&ie->list, pos);
844}
845
3175405b 846bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 847 bool name_known, bool *ssp)
1da177e4 848{
30883512 849 struct discovery_state *cache = &hdev->discovery;
70f23020 850 struct inquiry_entry *ie;
1da177e4 851
6ed93dc6 852 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 853
2b2fec4d
SJ
854 hci_remove_remote_oob_data(hdev, &data->bdaddr);
855
388fc8fa
JH
856 if (ssp)
857 *ssp = data->ssp_mode;
858
70f23020 859 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 860 if (ie) {
388fc8fa
JH
861 if (ie->data.ssp_mode && ssp)
862 *ssp = true;
863
a3d4e20a 864 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 865 data->rssi != ie->data.rssi) {
a3d4e20a
JH
866 ie->data.rssi = data->rssi;
867 hci_inquiry_cache_update_resolve(hdev, ie);
868 }
869
561aafbc 870 goto update;
a3d4e20a 871 }
561aafbc
JH
872
873 /* Entry not in the cache. Add new one. */
874 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
875 if (!ie)
3175405b 876 return false;
561aafbc
JH
877
878 list_add(&ie->all, &cache->all);
879
880 if (name_known) {
881 ie->name_state = NAME_KNOWN;
882 } else {
883 ie->name_state = NAME_NOT_KNOWN;
884 list_add(&ie->list, &cache->unknown);
885 }
70f23020 886
561aafbc
JH
887update:
888 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 889 ie->name_state != NAME_PENDING) {
561aafbc
JH
890 ie->name_state = NAME_KNOWN;
891 list_del(&ie->list);
1da177e4
LT
892 }
893
70f23020
AE
894 memcpy(&ie->data, data, sizeof(*data));
895 ie->timestamp = jiffies;
1da177e4 896 cache->timestamp = jiffies;
3175405b
JH
897
898 if (ie->name_state == NAME_NOT_KNOWN)
899 return false;
900
901 return true;
1da177e4
LT
902}
903
904static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
905{
30883512 906 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
907 struct inquiry_info *info = (struct inquiry_info *) buf;
908 struct inquiry_entry *e;
909 int copied = 0;
910
561aafbc 911 list_for_each_entry(e, &cache->all, all) {
1da177e4 912 struct inquiry_data *data = &e->data;
b57c1a56
JH
913
914 if (copied >= num)
915 break;
916
1da177e4
LT
917 bacpy(&info->bdaddr, &data->bdaddr);
918 info->pscan_rep_mode = data->pscan_rep_mode;
919 info->pscan_period_mode = data->pscan_period_mode;
920 info->pscan_mode = data->pscan_mode;
921 memcpy(info->dev_class, data->dev_class, 3);
922 info->clock_offset = data->clock_offset;
b57c1a56 923
1da177e4 924 info++;
b57c1a56 925 copied++;
1da177e4
LT
926 }
927
928 BT_DBG("cache %p, copied %d", cache, copied);
929 return copied;
930}
931
42c6b129 932static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
933{
934 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 935 struct hci_dev *hdev = req->hdev;
1da177e4
LT
936 struct hci_cp_inquiry cp;
937
938 BT_DBG("%s", hdev->name);
939
940 if (test_bit(HCI_INQUIRY, &hdev->flags))
941 return;
942
943 /* Start Inquiry */
944 memcpy(&cp.lap, &ir->lap, 3);
945 cp.length = ir->length;
946 cp.num_rsp = ir->num_rsp;
42c6b129 947 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
948}
949
3e13fa1e
AG
950static int wait_inquiry(void *word)
951{
952 schedule();
953 return signal_pending(current);
954}
955
1da177e4
LT
956int hci_inquiry(void __user *arg)
957{
958 __u8 __user *ptr = arg;
959 struct hci_inquiry_req ir;
960 struct hci_dev *hdev;
961 int err = 0, do_inquiry = 0, max_rsp;
962 long timeo;
963 __u8 *buf;
964
965 if (copy_from_user(&ir, ptr, sizeof(ir)))
966 return -EFAULT;
967
5a08ecce
AE
968 hdev = hci_dev_get(ir.dev_id);
969 if (!hdev)
1da177e4
LT
970 return -ENODEV;
971
09fd0de5 972 hci_dev_lock(hdev);
8e87d142 973 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 974 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 975 hci_inquiry_cache_flush(hdev);
1da177e4
LT
976 do_inquiry = 1;
977 }
09fd0de5 978 hci_dev_unlock(hdev);
1da177e4 979
04837f64 980 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
981
982 if (do_inquiry) {
01178cd4
JH
983 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
984 timeo);
70f23020
AE
985 if (err < 0)
986 goto done;
3e13fa1e
AG
987
988 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
989 * cleared). If it is interrupted by a signal, return -EINTR.
990 */
991 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
992 TASK_INTERRUPTIBLE))
993 return -EINTR;
70f23020 994 }
1da177e4 995
8fc9ced3
GP
996 /* for unlimited number of responses we will use buffer with
997 * 255 entries
998 */
1da177e4
LT
999 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1000
1001 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1002 * copy it to the user space.
1003 */
01df8c31 1004 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1005 if (!buf) {
1da177e4
LT
1006 err = -ENOMEM;
1007 goto done;
1008 }
1009
09fd0de5 1010 hci_dev_lock(hdev);
1da177e4 1011 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1012 hci_dev_unlock(hdev);
1da177e4
LT
1013
1014 BT_DBG("num_rsp %d", ir.num_rsp);
1015
1016 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1017 ptr += sizeof(ir);
1018 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1019 ir.num_rsp))
1da177e4 1020 err = -EFAULT;
8e87d142 1021 } else
1da177e4
LT
1022 err = -EFAULT;
1023
1024 kfree(buf);
1025
1026done:
1027 hci_dev_put(hdev);
1028 return err;
1029}
1030
3f0f524b
JH
1031static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1032{
1033 u8 ad_len = 0, flags = 0;
1034 size_t name_len;
1035
1036 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1037 flags |= LE_AD_GENERAL;
1038
1039 if (!lmp_bredr_capable(hdev))
1040 flags |= LE_AD_NO_BREDR;
1041
1042 if (lmp_le_br_capable(hdev))
1043 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1044
1045 if (lmp_host_le_br_capable(hdev))
1046 flags |= LE_AD_SIM_LE_BREDR_HOST;
1047
1048 if (flags) {
1049 BT_DBG("adv flags 0x%02x", flags);
1050
1051 ptr[0] = 2;
1052 ptr[1] = EIR_FLAGS;
1053 ptr[2] = flags;
1054
1055 ad_len += 3;
1056 ptr += 3;
1057 }
1058
1059 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1060 ptr[0] = 2;
1061 ptr[1] = EIR_TX_POWER;
1062 ptr[2] = (u8) hdev->adv_tx_power;
1063
1064 ad_len += 3;
1065 ptr += 3;
1066 }
1067
1068 name_len = strlen(hdev->dev_name);
1069 if (name_len > 0) {
1070 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1071
1072 if (name_len > max_len) {
1073 name_len = max_len;
1074 ptr[1] = EIR_NAME_SHORT;
1075 } else
1076 ptr[1] = EIR_NAME_COMPLETE;
1077
1078 ptr[0] = name_len + 1;
1079
1080 memcpy(ptr + 2, hdev->dev_name, name_len);
1081
1082 ad_len += (name_len + 2);
1083 ptr += (name_len + 2);
1084 }
1085
1086 return ad_len;
1087}
1088
04b4edcb 1089void hci_update_ad(struct hci_request *req)
3f0f524b 1090{
04b4edcb 1091 struct hci_dev *hdev = req->hdev;
3f0f524b
JH
1092 struct hci_cp_le_set_adv_data cp;
1093 u8 len;
3f0f524b 1094
04b4edcb
JH
1095 if (!lmp_le_capable(hdev))
1096 return;
3f0f524b
JH
1097
1098 memset(&cp, 0, sizeof(cp));
1099
1100 len = create_ad(hdev, cp.data);
1101
1102 if (hdev->adv_data_len == len &&
04b4edcb
JH
1103 memcmp(cp.data, hdev->adv_data, len) == 0)
1104 return;
3f0f524b
JH
1105
1106 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1107 hdev->adv_data_len = len;
1108
1109 cp.length = len;
3f0f524b 1110
04b4edcb 1111 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
3f0f524b
JH
1112}
1113
1da177e4
LT
1114/* ---- HCI ioctl helpers ---- */
1115
1116int hci_dev_open(__u16 dev)
1117{
1118 struct hci_dev *hdev;
1119 int ret = 0;
1120
5a08ecce
AE
1121 hdev = hci_dev_get(dev);
1122 if (!hdev)
1da177e4
LT
1123 return -ENODEV;
1124
1125 BT_DBG("%s %p", hdev->name, hdev);
1126
1127 hci_req_lock(hdev);
1128
94324962
JH
1129 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1130 ret = -ENODEV;
1131 goto done;
1132 }
1133
611b30f7
MH
1134 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1135 ret = -ERFKILL;
1136 goto done;
1137 }
1138
1da177e4
LT
1139 if (test_bit(HCI_UP, &hdev->flags)) {
1140 ret = -EALREADY;
1141 goto done;
1142 }
1143
1da177e4
LT
1144 if (hdev->open(hdev)) {
1145 ret = -EIO;
1146 goto done;
1147 }
1148
f41c70c4
MH
1149 atomic_set(&hdev->cmd_cnt, 1);
1150 set_bit(HCI_INIT, &hdev->flags);
1151
1152 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1153 ret = hdev->setup(hdev);
1154
1155 if (!ret) {
1156 /* Treat all non BR/EDR controllers as raw devices if
1157 * enable_hs is not set.
1158 */
1159 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1160 set_bit(HCI_RAW, &hdev->flags);
1161
1162 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1163 set_bit(HCI_RAW, &hdev->flags);
1164
1165 if (!test_bit(HCI_RAW, &hdev->flags))
1166 ret = __hci_init(hdev);
1da177e4
LT
1167 }
1168
f41c70c4
MH
1169 clear_bit(HCI_INIT, &hdev->flags);
1170
1da177e4
LT
1171 if (!ret) {
1172 hci_dev_hold(hdev);
1173 set_bit(HCI_UP, &hdev->flags);
1174 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a
AE
1175 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1176 mgmt_valid_hdev(hdev)) {
09fd0de5 1177 hci_dev_lock(hdev);
744cf19e 1178 mgmt_powered(hdev, 1);
09fd0de5 1179 hci_dev_unlock(hdev);
56e5cb86 1180 }
8e87d142 1181 } else {
1da177e4 1182 /* Init failed, cleanup */
3eff45ea 1183 flush_work(&hdev->tx_work);
c347b765 1184 flush_work(&hdev->cmd_work);
b78752cc 1185 flush_work(&hdev->rx_work);
1da177e4
LT
1186
1187 skb_queue_purge(&hdev->cmd_q);
1188 skb_queue_purge(&hdev->rx_q);
1189
1190 if (hdev->flush)
1191 hdev->flush(hdev);
1192
1193 if (hdev->sent_cmd) {
1194 kfree_skb(hdev->sent_cmd);
1195 hdev->sent_cmd = NULL;
1196 }
1197
1198 hdev->close(hdev);
1199 hdev->flags = 0;
1200 }
1201
1202done:
1203 hci_req_unlock(hdev);
1204 hci_dev_put(hdev);
1205 return ret;
1206}
1207
1208static int hci_dev_do_close(struct hci_dev *hdev)
1209{
1210 BT_DBG("%s %p", hdev->name, hdev);
1211
78c04c0b
VCG
1212 cancel_delayed_work(&hdev->power_off);
1213
1da177e4
LT
1214 hci_req_cancel(hdev, ENODEV);
1215 hci_req_lock(hdev);
1216
1217 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1218 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1219 hci_req_unlock(hdev);
1220 return 0;
1221 }
1222
3eff45ea
GP
1223 /* Flush RX and TX works */
1224 flush_work(&hdev->tx_work);
b78752cc 1225 flush_work(&hdev->rx_work);
1da177e4 1226
16ab91ab 1227 if (hdev->discov_timeout > 0) {
e0f9309f 1228 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1229 hdev->discov_timeout = 0;
5e5282bb 1230 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1231 }
1232
a8b2d5c2 1233 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1234 cancel_delayed_work(&hdev->service_cache);
1235
7ba8b4be
AG
1236 cancel_delayed_work_sync(&hdev->le_scan_disable);
1237
09fd0de5 1238 hci_dev_lock(hdev);
1f9b9a5d 1239 hci_inquiry_cache_flush(hdev);
1da177e4 1240 hci_conn_hash_flush(hdev);
09fd0de5 1241 hci_dev_unlock(hdev);
1da177e4
LT
1242
1243 hci_notify(hdev, HCI_DEV_DOWN);
1244
1245 if (hdev->flush)
1246 hdev->flush(hdev);
1247
1248 /* Reset device */
1249 skb_queue_purge(&hdev->cmd_q);
1250 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1251 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1252 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1253 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1254 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1255 clear_bit(HCI_INIT, &hdev->flags);
1256 }
1257
c347b765
GP
1258 /* flush cmd work */
1259 flush_work(&hdev->cmd_work);
1da177e4
LT
1260
1261 /* Drop queues */
1262 skb_queue_purge(&hdev->rx_q);
1263 skb_queue_purge(&hdev->cmd_q);
1264 skb_queue_purge(&hdev->raw_q);
1265
1266 /* Drop last sent command */
1267 if (hdev->sent_cmd) {
b79f44c1 1268 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1269 kfree_skb(hdev->sent_cmd);
1270 hdev->sent_cmd = NULL;
1271 }
1272
b6ddb638
JH
1273 kfree_skb(hdev->recv_evt);
1274 hdev->recv_evt = NULL;
1275
1da177e4
LT
1276 /* After this point our queues are empty
1277 * and no tasks are scheduled. */
1278 hdev->close(hdev);
1279
35b973c9
JH
1280 /* Clear flags */
1281 hdev->flags = 0;
1282 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1283
bb4b2a9a
AE
1284 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1285 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1286 hci_dev_lock(hdev);
1287 mgmt_powered(hdev, 0);
1288 hci_dev_unlock(hdev);
1289 }
5add6af8 1290
ced5c338
AE
1291 /* Controller radio is available but is currently powered down */
1292 hdev->amp_status = 0;
1293
e59fda8d 1294 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1295 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1296
1da177e4
LT
1297 hci_req_unlock(hdev);
1298
1299 hci_dev_put(hdev);
1300 return 0;
1301}
1302
1303int hci_dev_close(__u16 dev)
1304{
1305 struct hci_dev *hdev;
1306 int err;
1307
70f23020
AE
1308 hdev = hci_dev_get(dev);
1309 if (!hdev)
1da177e4 1310 return -ENODEV;
8ee56540
MH
1311
1312 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1313 cancel_delayed_work(&hdev->power_off);
1314
1da177e4 1315 err = hci_dev_do_close(hdev);
8ee56540 1316
1da177e4
LT
1317 hci_dev_put(hdev);
1318 return err;
1319}
1320
1321int hci_dev_reset(__u16 dev)
1322{
1323 struct hci_dev *hdev;
1324 int ret = 0;
1325
70f23020
AE
1326 hdev = hci_dev_get(dev);
1327 if (!hdev)
1da177e4
LT
1328 return -ENODEV;
1329
1330 hci_req_lock(hdev);
1da177e4
LT
1331
1332 if (!test_bit(HCI_UP, &hdev->flags))
1333 goto done;
1334
1335 /* Drop queues */
1336 skb_queue_purge(&hdev->rx_q);
1337 skb_queue_purge(&hdev->cmd_q);
1338
09fd0de5 1339 hci_dev_lock(hdev);
1f9b9a5d 1340 hci_inquiry_cache_flush(hdev);
1da177e4 1341 hci_conn_hash_flush(hdev);
09fd0de5 1342 hci_dev_unlock(hdev);
1da177e4
LT
1343
1344 if (hdev->flush)
1345 hdev->flush(hdev);
1346
8e87d142 1347 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1348 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1349
1350 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1351 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1352
1353done:
1da177e4
LT
1354 hci_req_unlock(hdev);
1355 hci_dev_put(hdev);
1356 return ret;
1357}
1358
1359int hci_dev_reset_stat(__u16 dev)
1360{
1361 struct hci_dev *hdev;
1362 int ret = 0;
1363
70f23020
AE
1364 hdev = hci_dev_get(dev);
1365 if (!hdev)
1da177e4
LT
1366 return -ENODEV;
1367
1368 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1369
1370 hci_dev_put(hdev);
1371
1372 return ret;
1373}
1374
1375int hci_dev_cmd(unsigned int cmd, void __user *arg)
1376{
1377 struct hci_dev *hdev;
1378 struct hci_dev_req dr;
1379 int err = 0;
1380
1381 if (copy_from_user(&dr, arg, sizeof(dr)))
1382 return -EFAULT;
1383
70f23020
AE
1384 hdev = hci_dev_get(dr.dev_id);
1385 if (!hdev)
1da177e4
LT
1386 return -ENODEV;
1387
1388 switch (cmd) {
1389 case HCISETAUTH:
01178cd4
JH
1390 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1391 HCI_INIT_TIMEOUT);
1da177e4
LT
1392 break;
1393
1394 case HCISETENCRYPT:
1395 if (!lmp_encrypt_capable(hdev)) {
1396 err = -EOPNOTSUPP;
1397 break;
1398 }
1399
1400 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1401 /* Auth must be enabled first */
01178cd4
JH
1402 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1403 HCI_INIT_TIMEOUT);
1da177e4
LT
1404 if (err)
1405 break;
1406 }
1407
01178cd4
JH
1408 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1409 HCI_INIT_TIMEOUT);
1da177e4
LT
1410 break;
1411
1412 case HCISETSCAN:
01178cd4
JH
1413 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1414 HCI_INIT_TIMEOUT);
1da177e4
LT
1415 break;
1416
1da177e4 1417 case HCISETLINKPOL:
01178cd4
JH
1418 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1419 HCI_INIT_TIMEOUT);
1da177e4
LT
1420 break;
1421
1422 case HCISETLINKMODE:
e4e8e37c
MH
1423 hdev->link_mode = ((__u16) dr.dev_opt) &
1424 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1425 break;
1426
1427 case HCISETPTYPE:
1428 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1429 break;
1430
1431 case HCISETACLMTU:
e4e8e37c
MH
1432 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1433 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1434 break;
1435
1436 case HCISETSCOMTU:
e4e8e37c
MH
1437 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1438 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1439 break;
1440
1441 default:
1442 err = -EINVAL;
1443 break;
1444 }
e4e8e37c 1445
1da177e4
LT
1446 hci_dev_put(hdev);
1447 return err;
1448}
1449
1450int hci_get_dev_list(void __user *arg)
1451{
8035ded4 1452 struct hci_dev *hdev;
1da177e4
LT
1453 struct hci_dev_list_req *dl;
1454 struct hci_dev_req *dr;
1da177e4
LT
1455 int n = 0, size, err;
1456 __u16 dev_num;
1457
1458 if (get_user(dev_num, (__u16 __user *) arg))
1459 return -EFAULT;
1460
1461 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1462 return -EINVAL;
1463
1464 size = sizeof(*dl) + dev_num * sizeof(*dr);
1465
70f23020
AE
1466 dl = kzalloc(size, GFP_KERNEL);
1467 if (!dl)
1da177e4
LT
1468 return -ENOMEM;
1469
1470 dr = dl->dev_req;
1471
f20d09d5 1472 read_lock(&hci_dev_list_lock);
8035ded4 1473 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1474 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1475 cancel_delayed_work(&hdev->power_off);
c542a06c 1476
a8b2d5c2
JH
1477 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1478 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1479
1da177e4
LT
1480 (dr + n)->dev_id = hdev->id;
1481 (dr + n)->dev_opt = hdev->flags;
c542a06c 1482
1da177e4
LT
1483 if (++n >= dev_num)
1484 break;
1485 }
f20d09d5 1486 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1487
1488 dl->dev_num = n;
1489 size = sizeof(*dl) + n * sizeof(*dr);
1490
1491 err = copy_to_user(arg, dl, size);
1492 kfree(dl);
1493
1494 return err ? -EFAULT : 0;
1495}
1496
1497int hci_get_dev_info(void __user *arg)
1498{
1499 struct hci_dev *hdev;
1500 struct hci_dev_info di;
1501 int err = 0;
1502
1503 if (copy_from_user(&di, arg, sizeof(di)))
1504 return -EFAULT;
1505
70f23020
AE
1506 hdev = hci_dev_get(di.dev_id);
1507 if (!hdev)
1da177e4
LT
1508 return -ENODEV;
1509
a8b2d5c2 1510 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1511 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1512
a8b2d5c2
JH
1513 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1514 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1515
1da177e4
LT
1516 strcpy(di.name, hdev->name);
1517 di.bdaddr = hdev->bdaddr;
943da25d 1518 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1519 di.flags = hdev->flags;
1520 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1521 if (lmp_bredr_capable(hdev)) {
1522 di.acl_mtu = hdev->acl_mtu;
1523 di.acl_pkts = hdev->acl_pkts;
1524 di.sco_mtu = hdev->sco_mtu;
1525 di.sco_pkts = hdev->sco_pkts;
1526 } else {
1527 di.acl_mtu = hdev->le_mtu;
1528 di.acl_pkts = hdev->le_pkts;
1529 di.sco_mtu = 0;
1530 di.sco_pkts = 0;
1531 }
1da177e4
LT
1532 di.link_policy = hdev->link_policy;
1533 di.link_mode = hdev->link_mode;
1534
1535 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1536 memcpy(&di.features, &hdev->features, sizeof(di.features));
1537
1538 if (copy_to_user(arg, &di, sizeof(di)))
1539 err = -EFAULT;
1540
1541 hci_dev_put(hdev);
1542
1543 return err;
1544}
1545
1546/* ---- Interface to HCI drivers ---- */
1547
611b30f7
MH
1548static int hci_rfkill_set_block(void *data, bool blocked)
1549{
1550 struct hci_dev *hdev = data;
1551
1552 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1553
1554 if (!blocked)
1555 return 0;
1556
1557 hci_dev_do_close(hdev);
1558
1559 return 0;
1560}
1561
1562static const struct rfkill_ops hci_rfkill_ops = {
1563 .set_block = hci_rfkill_set_block,
1564};
1565
ab81cbf9
JH
1566static void hci_power_on(struct work_struct *work)
1567{
1568 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 1569 int err;
ab81cbf9
JH
1570
1571 BT_DBG("%s", hdev->name);
1572
96570ffc
JH
1573 err = hci_dev_open(hdev->id);
1574 if (err < 0) {
1575 mgmt_set_powered_failed(hdev, err);
ab81cbf9 1576 return;
96570ffc 1577 }
ab81cbf9 1578
a8b2d5c2 1579 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
19202573
JH
1580 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1581 HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1582
a8b2d5c2 1583 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1584 mgmt_index_added(hdev);
ab81cbf9
JH
1585}
1586
1587static void hci_power_off(struct work_struct *work)
1588{
3243553f 1589 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1590 power_off.work);
ab81cbf9
JH
1591
1592 BT_DBG("%s", hdev->name);
1593
8ee56540 1594 hci_dev_do_close(hdev);
ab81cbf9
JH
1595}
1596
16ab91ab
JH
1597static void hci_discov_off(struct work_struct *work)
1598{
1599 struct hci_dev *hdev;
1600 u8 scan = SCAN_PAGE;
1601
1602 hdev = container_of(work, struct hci_dev, discov_off.work);
1603
1604 BT_DBG("%s", hdev->name);
1605
09fd0de5 1606 hci_dev_lock(hdev);
16ab91ab
JH
1607
1608 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1609
1610 hdev->discov_timeout = 0;
1611
09fd0de5 1612 hci_dev_unlock(hdev);
16ab91ab
JH
1613}
1614
2aeb9a1a
JH
1615int hci_uuids_clear(struct hci_dev *hdev)
1616{
4821002c 1617 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1618
4821002c
JH
1619 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1620 list_del(&uuid->list);
2aeb9a1a
JH
1621 kfree(uuid);
1622 }
1623
1624 return 0;
1625}
1626
55ed8ca1
JH
1627int hci_link_keys_clear(struct hci_dev *hdev)
1628{
1629 struct list_head *p, *n;
1630
1631 list_for_each_safe(p, n, &hdev->link_keys) {
1632 struct link_key *key;
1633
1634 key = list_entry(p, struct link_key, list);
1635
1636 list_del(p);
1637 kfree(key);
1638 }
1639
1640 return 0;
1641}
1642
b899efaf
VCG
1643int hci_smp_ltks_clear(struct hci_dev *hdev)
1644{
1645 struct smp_ltk *k, *tmp;
1646
1647 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1648 list_del(&k->list);
1649 kfree(k);
1650 }
1651
1652 return 0;
1653}
1654
55ed8ca1
JH
1655struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1656{
8035ded4 1657 struct link_key *k;
55ed8ca1 1658
8035ded4 1659 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1660 if (bacmp(bdaddr, &k->bdaddr) == 0)
1661 return k;
55ed8ca1
JH
1662
1663 return NULL;
1664}
1665
745c0ce3 1666static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1667 u8 key_type, u8 old_key_type)
d25e28ab
JH
1668{
1669 /* Legacy key */
1670 if (key_type < 0x03)
745c0ce3 1671 return true;
d25e28ab
JH
1672
1673 /* Debug keys are insecure so don't store them persistently */
1674 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1675 return false;
d25e28ab
JH
1676
1677 /* Changed combination key and there's no previous one */
1678 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1679 return false;
d25e28ab
JH
1680
1681 /* Security mode 3 case */
1682 if (!conn)
745c0ce3 1683 return true;
d25e28ab
JH
1684
1685 /* Neither local nor remote side had no-bonding as requirement */
1686 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1687 return true;
d25e28ab
JH
1688
1689 /* Local side had dedicated bonding as requirement */
1690 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1691 return true;
d25e28ab
JH
1692
1693 /* Remote side had dedicated bonding as requirement */
1694 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1695 return true;
d25e28ab
JH
1696
1697 /* If none of the above criteria match, then don't store the key
1698 * persistently */
745c0ce3 1699 return false;
d25e28ab
JH
1700}
1701
c9839a11 1702struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1703{
c9839a11 1704 struct smp_ltk *k;
75d262c2 1705
c9839a11
VCG
1706 list_for_each_entry(k, &hdev->long_term_keys, list) {
1707 if (k->ediv != ediv ||
a8c5fb1a 1708 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1709 continue;
1710
c9839a11 1711 return k;
75d262c2
VCG
1712 }
1713
1714 return NULL;
1715}
75d262c2 1716
c9839a11 1717struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1718 u8 addr_type)
75d262c2 1719{
c9839a11 1720 struct smp_ltk *k;
75d262c2 1721
c9839a11
VCG
1722 list_for_each_entry(k, &hdev->long_term_keys, list)
1723 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1724 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1725 return k;
1726
1727 return NULL;
1728}
75d262c2 1729
d25e28ab 1730int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1731 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1732{
1733 struct link_key *key, *old_key;
745c0ce3
VA
1734 u8 old_key_type;
1735 bool persistent;
55ed8ca1
JH
1736
1737 old_key = hci_find_link_key(hdev, bdaddr);
1738 if (old_key) {
1739 old_key_type = old_key->type;
1740 key = old_key;
1741 } else {
12adcf3a 1742 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1743 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1744 if (!key)
1745 return -ENOMEM;
1746 list_add(&key->list, &hdev->link_keys);
1747 }
1748
6ed93dc6 1749 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1750
d25e28ab
JH
1751 /* Some buggy controller combinations generate a changed
1752 * combination key for legacy pairing even when there's no
1753 * previous key */
1754 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1755 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1756 type = HCI_LK_COMBINATION;
655fe6ec
JH
1757 if (conn)
1758 conn->key_type = type;
1759 }
d25e28ab 1760
55ed8ca1 1761 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1762 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1763 key->pin_len = pin_len;
1764
b6020ba0 1765 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1766 key->type = old_key_type;
4748fed2
JH
1767 else
1768 key->type = type;
1769
4df378a1
JH
1770 if (!new_key)
1771 return 0;
1772
1773 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1774
744cf19e 1775 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1776
6ec5bcad
VA
1777 if (conn)
1778 conn->flush_key = !persistent;
55ed8ca1
JH
1779
1780 return 0;
1781}
1782
c9839a11 1783int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1784 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1785 ediv, u8 rand[8])
75d262c2 1786{
c9839a11 1787 struct smp_ltk *key, *old_key;
75d262c2 1788
c9839a11
VCG
1789 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1790 return 0;
75d262c2 1791
c9839a11
VCG
1792 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1793 if (old_key)
75d262c2 1794 key = old_key;
c9839a11
VCG
1795 else {
1796 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1797 if (!key)
1798 return -ENOMEM;
c9839a11 1799 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1800 }
1801
75d262c2 1802 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1803 key->bdaddr_type = addr_type;
1804 memcpy(key->val, tk, sizeof(key->val));
1805 key->authenticated = authenticated;
1806 key->ediv = ediv;
1807 key->enc_size = enc_size;
1808 key->type = type;
1809 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1810
c9839a11
VCG
1811 if (!new_key)
1812 return 0;
75d262c2 1813
261cc5aa
VCG
1814 if (type & HCI_SMP_LTK)
1815 mgmt_new_ltk(hdev, key, 1);
1816
75d262c2
VCG
1817 return 0;
1818}
1819
55ed8ca1
JH
1820int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1821{
1822 struct link_key *key;
1823
1824 key = hci_find_link_key(hdev, bdaddr);
1825 if (!key)
1826 return -ENOENT;
1827
6ed93dc6 1828 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1829
1830 list_del(&key->list);
1831 kfree(key);
1832
1833 return 0;
1834}
1835
b899efaf
VCG
1836int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1837{
1838 struct smp_ltk *k, *tmp;
1839
1840 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1841 if (bacmp(bdaddr, &k->bdaddr))
1842 continue;
1843
6ed93dc6 1844 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1845
1846 list_del(&k->list);
1847 kfree(k);
1848 }
1849
1850 return 0;
1851}
1852
6bd32326 1853/* HCI command timer function */
bda4f23a 1854static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1855{
1856 struct hci_dev *hdev = (void *) arg;
1857
bda4f23a
AE
1858 if (hdev->sent_cmd) {
1859 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1860 u16 opcode = __le16_to_cpu(sent->opcode);
1861
1862 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1863 } else {
1864 BT_ERR("%s command tx timeout", hdev->name);
1865 }
1866
6bd32326 1867 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1868 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1869}
1870
2763eda6 1871struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1872 bdaddr_t *bdaddr)
2763eda6
SJ
1873{
1874 struct oob_data *data;
1875
1876 list_for_each_entry(data, &hdev->remote_oob_data, list)
1877 if (bacmp(bdaddr, &data->bdaddr) == 0)
1878 return data;
1879
1880 return NULL;
1881}
1882
1883int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1884{
1885 struct oob_data *data;
1886
1887 data = hci_find_remote_oob_data(hdev, bdaddr);
1888 if (!data)
1889 return -ENOENT;
1890
6ed93dc6 1891 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1892
1893 list_del(&data->list);
1894 kfree(data);
1895
1896 return 0;
1897}
1898
1899int hci_remote_oob_data_clear(struct hci_dev *hdev)
1900{
1901 struct oob_data *data, *n;
1902
1903 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1904 list_del(&data->list);
1905 kfree(data);
1906 }
1907
1908 return 0;
1909}
1910
1911int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1912 u8 *randomizer)
2763eda6
SJ
1913{
1914 struct oob_data *data;
1915
1916 data = hci_find_remote_oob_data(hdev, bdaddr);
1917
1918 if (!data) {
1919 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1920 if (!data)
1921 return -ENOMEM;
1922
1923 bacpy(&data->bdaddr, bdaddr);
1924 list_add(&data->list, &hdev->remote_oob_data);
1925 }
1926
1927 memcpy(data->hash, hash, sizeof(data->hash));
1928 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1929
6ed93dc6 1930 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1931
1932 return 0;
1933}
1934
04124681 1935struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1936{
8035ded4 1937 struct bdaddr_list *b;
b2a66aad 1938
8035ded4 1939 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1940 if (bacmp(bdaddr, &b->bdaddr) == 0)
1941 return b;
b2a66aad
AJ
1942
1943 return NULL;
1944}
1945
1946int hci_blacklist_clear(struct hci_dev *hdev)
1947{
1948 struct list_head *p, *n;
1949
1950 list_for_each_safe(p, n, &hdev->blacklist) {
1951 struct bdaddr_list *b;
1952
1953 b = list_entry(p, struct bdaddr_list, list);
1954
1955 list_del(p);
1956 kfree(b);
1957 }
1958
1959 return 0;
1960}
1961
88c1fe4b 1962int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1963{
1964 struct bdaddr_list *entry;
b2a66aad
AJ
1965
1966 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1967 return -EBADF;
1968
5e762444
AJ
1969 if (hci_blacklist_lookup(hdev, bdaddr))
1970 return -EEXIST;
b2a66aad
AJ
1971
1972 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1973 if (!entry)
1974 return -ENOMEM;
b2a66aad
AJ
1975
1976 bacpy(&entry->bdaddr, bdaddr);
1977
1978 list_add(&entry->list, &hdev->blacklist);
1979
88c1fe4b 1980 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1981}
1982
88c1fe4b 1983int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1984{
1985 struct bdaddr_list *entry;
b2a66aad 1986
1ec918ce 1987 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1988 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1989
1990 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1991 if (!entry)
5e762444 1992 return -ENOENT;
b2a66aad
AJ
1993
1994 list_del(&entry->list);
1995 kfree(entry);
1996
88c1fe4b 1997 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1998}
1999
4c87eaab 2000static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2001{
4c87eaab
AG
2002 if (status) {
2003 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2004
4c87eaab
AG
2005 hci_dev_lock(hdev);
2006 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2007 hci_dev_unlock(hdev);
2008 return;
2009 }
7ba8b4be
AG
2010}
2011
4c87eaab 2012static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2013{
4c87eaab
AG
2014 /* General inquiry access code (GIAC) */
2015 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2016 struct hci_request req;
2017 struct hci_cp_inquiry cp;
7ba8b4be
AG
2018 int err;
2019
4c87eaab
AG
2020 if (status) {
2021 BT_ERR("Failed to disable LE scanning: status %d", status);
2022 return;
2023 }
7ba8b4be 2024
4c87eaab
AG
2025 switch (hdev->discovery.type) {
2026 case DISCOV_TYPE_LE:
2027 hci_dev_lock(hdev);
2028 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2029 hci_dev_unlock(hdev);
2030 break;
7ba8b4be 2031
4c87eaab
AG
2032 case DISCOV_TYPE_INTERLEAVED:
2033 hci_req_init(&req, hdev);
7ba8b4be 2034
4c87eaab
AG
2035 memset(&cp, 0, sizeof(cp));
2036 memcpy(&cp.lap, lap, sizeof(cp.lap));
2037 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2038 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2039
4c87eaab 2040 hci_dev_lock(hdev);
7dbfac1d 2041
4c87eaab 2042 hci_inquiry_cache_flush(hdev);
7dbfac1d 2043
4c87eaab
AG
2044 err = hci_req_run(&req, inquiry_complete);
2045 if (err) {
2046 BT_ERR("Inquiry request failed: err %d", err);
2047 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2048 }
7dbfac1d 2049
4c87eaab
AG
2050 hci_dev_unlock(hdev);
2051 break;
7dbfac1d 2052 }
7dbfac1d
AG
2053}
2054
7ba8b4be
AG
2055static void le_scan_disable_work(struct work_struct *work)
2056{
2057 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2058 le_scan_disable.work);
7ba8b4be 2059 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
2060 struct hci_request req;
2061 int err;
7ba8b4be
AG
2062
2063 BT_DBG("%s", hdev->name);
2064
4c87eaab 2065 hci_req_init(&req, hdev);
28b75a89 2066
7ba8b4be 2067 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
2068 cp.enable = LE_SCAN_DISABLE;
2069 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 2070
4c87eaab
AG
2071 err = hci_req_run(&req, le_scan_disable_work_complete);
2072 if (err)
2073 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2074}
2075
9be0dab7
DH
2076/* Alloc HCI device */
2077struct hci_dev *hci_alloc_dev(void)
2078{
2079 struct hci_dev *hdev;
2080
2081 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2082 if (!hdev)
2083 return NULL;
2084
b1b813d4
DH
2085 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2086 hdev->esco_type = (ESCO_HV1);
2087 hdev->link_mode = (HCI_LM_ACCEPT);
2088 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2089 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2090 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2091
b1b813d4
DH
2092 hdev->sniff_max_interval = 800;
2093 hdev->sniff_min_interval = 80;
2094
2095 mutex_init(&hdev->lock);
2096 mutex_init(&hdev->req_lock);
2097
2098 INIT_LIST_HEAD(&hdev->mgmt_pending);
2099 INIT_LIST_HEAD(&hdev->blacklist);
2100 INIT_LIST_HEAD(&hdev->uuids);
2101 INIT_LIST_HEAD(&hdev->link_keys);
2102 INIT_LIST_HEAD(&hdev->long_term_keys);
2103 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2104 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2105
2106 INIT_WORK(&hdev->rx_work, hci_rx_work);
2107 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2108 INIT_WORK(&hdev->tx_work, hci_tx_work);
2109 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2110
b1b813d4
DH
2111 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2112 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2113 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2114
b1b813d4
DH
2115 skb_queue_head_init(&hdev->rx_q);
2116 skb_queue_head_init(&hdev->cmd_q);
2117 skb_queue_head_init(&hdev->raw_q);
2118
2119 init_waitqueue_head(&hdev->req_wait_q);
2120
bda4f23a 2121 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2122
b1b813d4
DH
2123 hci_init_sysfs(hdev);
2124 discovery_init(hdev);
9be0dab7
DH
2125
2126 return hdev;
2127}
2128EXPORT_SYMBOL(hci_alloc_dev);
2129
2130/* Free HCI device */
2131void hci_free_dev(struct hci_dev *hdev)
2132{
9be0dab7
DH
2133 /* will free via device release */
2134 put_device(&hdev->dev);
2135}
2136EXPORT_SYMBOL(hci_free_dev);
2137
1da177e4
LT
2138/* Register HCI device */
2139int hci_register_dev(struct hci_dev *hdev)
2140{
b1b813d4 2141 int id, error;
1da177e4 2142
010666a1 2143 if (!hdev->open || !hdev->close)
1da177e4
LT
2144 return -EINVAL;
2145
08add513
MM
2146 /* Do not allow HCI_AMP devices to register at index 0,
2147 * so the index can be used as the AMP controller ID.
2148 */
3df92b31
SL
2149 switch (hdev->dev_type) {
2150 case HCI_BREDR:
2151 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2152 break;
2153 case HCI_AMP:
2154 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2155 break;
2156 default:
2157 return -EINVAL;
1da177e4 2158 }
8e87d142 2159
3df92b31
SL
2160 if (id < 0)
2161 return id;
2162
1da177e4
LT
2163 sprintf(hdev->name, "hci%d", id);
2164 hdev->id = id;
2d8b3a11
AE
2165
2166 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2167
3df92b31
SL
2168 write_lock(&hci_dev_list_lock);
2169 list_add(&hdev->list, &hci_dev_list);
f20d09d5 2170 write_unlock(&hci_dev_list_lock);
1da177e4 2171
d8537548
KC
2172 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2173 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
2174 if (!hdev->workqueue) {
2175 error = -ENOMEM;
2176 goto err;
2177 }
f48fd9c8 2178
d8537548
KC
2179 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2180 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
2181 if (!hdev->req_workqueue) {
2182 destroy_workqueue(hdev->workqueue);
2183 error = -ENOMEM;
2184 goto err;
2185 }
2186
33ca954d
DH
2187 error = hci_add_sysfs(hdev);
2188 if (error < 0)
2189 goto err_wqueue;
1da177e4 2190
611b30f7 2191 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2192 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2193 hdev);
611b30f7
MH
2194 if (hdev->rfkill) {
2195 if (rfkill_register(hdev->rfkill) < 0) {
2196 rfkill_destroy(hdev->rfkill);
2197 hdev->rfkill = NULL;
2198 }
2199 }
2200
a8b2d5c2 2201 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2202
2203 if (hdev->dev_type != HCI_AMP)
2204 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2205
1da177e4 2206 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2207 hci_dev_hold(hdev);
1da177e4 2208
19202573 2209 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2210
1da177e4 2211 return id;
f48fd9c8 2212
33ca954d
DH
2213err_wqueue:
2214 destroy_workqueue(hdev->workqueue);
6ead1bbc 2215 destroy_workqueue(hdev->req_workqueue);
33ca954d 2216err:
3df92b31 2217 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 2218 write_lock(&hci_dev_list_lock);
f48fd9c8 2219 list_del(&hdev->list);
f20d09d5 2220 write_unlock(&hci_dev_list_lock);
f48fd9c8 2221
33ca954d 2222 return error;
1da177e4
LT
2223}
2224EXPORT_SYMBOL(hci_register_dev);
2225
2226/* Unregister HCI device */
59735631 2227void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2228{
3df92b31 2229 int i, id;
ef222013 2230
c13854ce 2231 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2232
94324962
JH
2233 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2234
3df92b31
SL
2235 id = hdev->id;
2236
f20d09d5 2237 write_lock(&hci_dev_list_lock);
1da177e4 2238 list_del(&hdev->list);
f20d09d5 2239 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2240
2241 hci_dev_do_close(hdev);
2242
cd4c5391 2243 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2244 kfree_skb(hdev->reassembly[i]);
2245
b9b5ef18
GP
2246 cancel_work_sync(&hdev->power_on);
2247
ab81cbf9 2248 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2249 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2250 hci_dev_lock(hdev);
744cf19e 2251 mgmt_index_removed(hdev);
09fd0de5 2252 hci_dev_unlock(hdev);
56e5cb86 2253 }
ab81cbf9 2254
2e58ef3e
JH
2255 /* mgmt_index_removed should take care of emptying the
2256 * pending list */
2257 BUG_ON(!list_empty(&hdev->mgmt_pending));
2258
1da177e4
LT
2259 hci_notify(hdev, HCI_DEV_UNREG);
2260
611b30f7
MH
2261 if (hdev->rfkill) {
2262 rfkill_unregister(hdev->rfkill);
2263 rfkill_destroy(hdev->rfkill);
2264 }
2265
ce242970 2266 hci_del_sysfs(hdev);
147e2d59 2267
f48fd9c8 2268 destroy_workqueue(hdev->workqueue);
6ead1bbc 2269 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2270
09fd0de5 2271 hci_dev_lock(hdev);
e2e0cacb 2272 hci_blacklist_clear(hdev);
2aeb9a1a 2273 hci_uuids_clear(hdev);
55ed8ca1 2274 hci_link_keys_clear(hdev);
b899efaf 2275 hci_smp_ltks_clear(hdev);
2763eda6 2276 hci_remote_oob_data_clear(hdev);
09fd0de5 2277 hci_dev_unlock(hdev);
e2e0cacb 2278
dc946bd8 2279 hci_dev_put(hdev);
3df92b31
SL
2280
2281 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2282}
2283EXPORT_SYMBOL(hci_unregister_dev);
2284
2285/* Suspend HCI device */
2286int hci_suspend_dev(struct hci_dev *hdev)
2287{
2288 hci_notify(hdev, HCI_DEV_SUSPEND);
2289 return 0;
2290}
2291EXPORT_SYMBOL(hci_suspend_dev);
2292
2293/* Resume HCI device */
2294int hci_resume_dev(struct hci_dev *hdev)
2295{
2296 hci_notify(hdev, HCI_DEV_RESUME);
2297 return 0;
2298}
2299EXPORT_SYMBOL(hci_resume_dev);
2300
76bca880
MH
2301/* Receive frame from HCI drivers */
2302int hci_recv_frame(struct sk_buff *skb)
2303{
2304 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2305 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2306 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2307 kfree_skb(skb);
2308 return -ENXIO;
2309 }
2310
d82603c6 2311 /* Incoming skb */
76bca880
MH
2312 bt_cb(skb)->incoming = 1;
2313
2314 /* Time stamp */
2315 __net_timestamp(skb);
2316
76bca880 2317 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2318 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2319
76bca880
MH
2320 return 0;
2321}
2322EXPORT_SYMBOL(hci_recv_frame);
2323
33e882a5 2324static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2325 int count, __u8 index)
33e882a5
SS
2326{
2327 int len = 0;
2328 int hlen = 0;
2329 int remain = count;
2330 struct sk_buff *skb;
2331 struct bt_skb_cb *scb;
2332
2333 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2334 index >= NUM_REASSEMBLY)
33e882a5
SS
2335 return -EILSEQ;
2336
2337 skb = hdev->reassembly[index];
2338
2339 if (!skb) {
2340 switch (type) {
2341 case HCI_ACLDATA_PKT:
2342 len = HCI_MAX_FRAME_SIZE;
2343 hlen = HCI_ACL_HDR_SIZE;
2344 break;
2345 case HCI_EVENT_PKT:
2346 len = HCI_MAX_EVENT_SIZE;
2347 hlen = HCI_EVENT_HDR_SIZE;
2348 break;
2349 case HCI_SCODATA_PKT:
2350 len = HCI_MAX_SCO_SIZE;
2351 hlen = HCI_SCO_HDR_SIZE;
2352 break;
2353 }
2354
1e429f38 2355 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2356 if (!skb)
2357 return -ENOMEM;
2358
2359 scb = (void *) skb->cb;
2360 scb->expect = hlen;
2361 scb->pkt_type = type;
2362
2363 skb->dev = (void *) hdev;
2364 hdev->reassembly[index] = skb;
2365 }
2366
2367 while (count) {
2368 scb = (void *) skb->cb;
89bb46d0 2369 len = min_t(uint, scb->expect, count);
33e882a5
SS
2370
2371 memcpy(skb_put(skb, len), data, len);
2372
2373 count -= len;
2374 data += len;
2375 scb->expect -= len;
2376 remain = count;
2377
2378 switch (type) {
2379 case HCI_EVENT_PKT:
2380 if (skb->len == HCI_EVENT_HDR_SIZE) {
2381 struct hci_event_hdr *h = hci_event_hdr(skb);
2382 scb->expect = h->plen;
2383
2384 if (skb_tailroom(skb) < scb->expect) {
2385 kfree_skb(skb);
2386 hdev->reassembly[index] = NULL;
2387 return -ENOMEM;
2388 }
2389 }
2390 break;
2391
2392 case HCI_ACLDATA_PKT:
2393 if (skb->len == HCI_ACL_HDR_SIZE) {
2394 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2395 scb->expect = __le16_to_cpu(h->dlen);
2396
2397 if (skb_tailroom(skb) < scb->expect) {
2398 kfree_skb(skb);
2399 hdev->reassembly[index] = NULL;
2400 return -ENOMEM;
2401 }
2402 }
2403 break;
2404
2405 case HCI_SCODATA_PKT:
2406 if (skb->len == HCI_SCO_HDR_SIZE) {
2407 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2408 scb->expect = h->dlen;
2409
2410 if (skb_tailroom(skb) < scb->expect) {
2411 kfree_skb(skb);
2412 hdev->reassembly[index] = NULL;
2413 return -ENOMEM;
2414 }
2415 }
2416 break;
2417 }
2418
2419 if (scb->expect == 0) {
2420 /* Complete frame */
2421
2422 bt_cb(skb)->pkt_type = type;
2423 hci_recv_frame(skb);
2424
2425 hdev->reassembly[index] = NULL;
2426 return remain;
2427 }
2428 }
2429
2430 return remain;
2431}
2432
ef222013
MH
2433int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2434{
f39a3c06
SS
2435 int rem = 0;
2436
ef222013
MH
2437 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2438 return -EILSEQ;
2439
da5f6c37 2440 while (count) {
1e429f38 2441 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2442 if (rem < 0)
2443 return rem;
ef222013 2444
f39a3c06
SS
2445 data += (count - rem);
2446 count = rem;
f81c6224 2447 }
ef222013 2448
f39a3c06 2449 return rem;
ef222013
MH
2450}
2451EXPORT_SYMBOL(hci_recv_fragment);
2452
99811510
SS
2453#define STREAM_REASSEMBLY 0
2454
2455int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2456{
2457 int type;
2458 int rem = 0;
2459
da5f6c37 2460 while (count) {
99811510
SS
2461 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2462
2463 if (!skb) {
2464 struct { char type; } *pkt;
2465
2466 /* Start of the frame */
2467 pkt = data;
2468 type = pkt->type;
2469
2470 data++;
2471 count--;
2472 } else
2473 type = bt_cb(skb)->pkt_type;
2474
1e429f38 2475 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2476 STREAM_REASSEMBLY);
99811510
SS
2477 if (rem < 0)
2478 return rem;
2479
2480 data += (count - rem);
2481 count = rem;
f81c6224 2482 }
99811510
SS
2483
2484 return rem;
2485}
2486EXPORT_SYMBOL(hci_recv_stream_fragment);
2487
1da177e4
LT
2488/* ---- Interface to upper protocols ---- */
2489
1da177e4
LT
2490int hci_register_cb(struct hci_cb *cb)
2491{
2492 BT_DBG("%p name %s", cb, cb->name);
2493
f20d09d5 2494 write_lock(&hci_cb_list_lock);
1da177e4 2495 list_add(&cb->list, &hci_cb_list);
f20d09d5 2496 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2497
2498 return 0;
2499}
2500EXPORT_SYMBOL(hci_register_cb);
2501
2502int hci_unregister_cb(struct hci_cb *cb)
2503{
2504 BT_DBG("%p name %s", cb, cb->name);
2505
f20d09d5 2506 write_lock(&hci_cb_list_lock);
1da177e4 2507 list_del(&cb->list);
f20d09d5 2508 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2509
2510 return 0;
2511}
2512EXPORT_SYMBOL(hci_unregister_cb);
2513
2514static int hci_send_frame(struct sk_buff *skb)
2515{
2516 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2517
2518 if (!hdev) {
2519 kfree_skb(skb);
2520 return -ENODEV;
2521 }
2522
0d48d939 2523 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2524
cd82e61c
MH
2525 /* Time stamp */
2526 __net_timestamp(skb);
1da177e4 2527
cd82e61c
MH
2528 /* Send copy to monitor */
2529 hci_send_to_monitor(hdev, skb);
2530
2531 if (atomic_read(&hdev->promisc)) {
2532 /* Send copy to the sockets */
470fe1b5 2533 hci_send_to_sock(hdev, skb);
1da177e4
LT
2534 }
2535
2536 /* Get rid of skb owner, prior to sending to the driver. */
2537 skb_orphan(skb);
2538
2539 return hdev->send(skb);
2540}
2541
3119ae95
JH
2542void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2543{
2544 skb_queue_head_init(&req->cmd_q);
2545 req->hdev = hdev;
5d73e034 2546 req->err = 0;
3119ae95
JH
2547}
2548
2549int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2550{
2551 struct hci_dev *hdev = req->hdev;
2552 struct sk_buff *skb;
2553 unsigned long flags;
2554
2555 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2556
5d73e034
AG
2557 /* If an error occured during request building, remove all HCI
2558 * commands queued on the HCI request queue.
2559 */
2560 if (req->err) {
2561 skb_queue_purge(&req->cmd_q);
2562 return req->err;
2563 }
2564
3119ae95
JH
2565 /* Do not allow empty requests */
2566 if (skb_queue_empty(&req->cmd_q))
382b0c39 2567 return -ENODATA;
3119ae95
JH
2568
2569 skb = skb_peek_tail(&req->cmd_q);
2570 bt_cb(skb)->req.complete = complete;
2571
2572 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2573 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2574 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2575
2576 queue_work(hdev->workqueue, &hdev->cmd_work);
2577
2578 return 0;
2579}
2580
1ca3a9d0 2581static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 2582 u32 plen, const void *param)
1da177e4
LT
2583{
2584 int len = HCI_COMMAND_HDR_SIZE + plen;
2585 struct hci_command_hdr *hdr;
2586 struct sk_buff *skb;
2587
1da177e4 2588 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2589 if (!skb)
2590 return NULL;
1da177e4
LT
2591
2592 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2593 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2594 hdr->plen = plen;
2595
2596 if (plen)
2597 memcpy(skb_put(skb, plen), param, plen);
2598
2599 BT_DBG("skb len %d", skb->len);
2600
0d48d939 2601 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2602 skb->dev = (void *) hdev;
c78ae283 2603
1ca3a9d0
JH
2604 return skb;
2605}
2606
2607/* Send HCI command */
07dc93dd
JH
2608int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2609 const void *param)
1ca3a9d0
JH
2610{
2611 struct sk_buff *skb;
2612
2613 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2614
2615 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2616 if (!skb) {
2617 BT_ERR("%s no memory for command", hdev->name);
2618 return -ENOMEM;
2619 }
2620
11714b3d
JH
2621 /* Stand-alone HCI commands must be flaged as
2622 * single-command requests.
2623 */
2624 bt_cb(skb)->req.start = true;
2625
1da177e4 2626 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2627 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2628
2629 return 0;
2630}
1da177e4 2631
71c76a17 2632/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
2633void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2634 const void *param, u8 event)
71c76a17
JH
2635{
2636 struct hci_dev *hdev = req->hdev;
2637 struct sk_buff *skb;
2638
2639 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2640
34739c1e
AG
2641 /* If an error occured during request building, there is no point in
2642 * queueing the HCI command. We can simply return.
2643 */
2644 if (req->err)
2645 return;
2646
71c76a17
JH
2647 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2648 if (!skb) {
5d73e034
AG
2649 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2650 hdev->name, opcode);
2651 req->err = -ENOMEM;
e348fe6b 2652 return;
71c76a17
JH
2653 }
2654
2655 if (skb_queue_empty(&req->cmd_q))
2656 bt_cb(skb)->req.start = true;
2657
02350a72
JH
2658 bt_cb(skb)->req.event = event;
2659
71c76a17 2660 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2661}
2662
07dc93dd
JH
2663void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2664 const void *param)
02350a72
JH
2665{
2666 hci_req_add_ev(req, opcode, plen, param, 0);
2667}
2668
1da177e4 2669/* Get data from the previously sent command */
a9de9248 2670void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2671{
2672 struct hci_command_hdr *hdr;
2673
2674 if (!hdev->sent_cmd)
2675 return NULL;
2676
2677 hdr = (void *) hdev->sent_cmd->data;
2678
a9de9248 2679 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2680 return NULL;
2681
f0e09510 2682 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2683
2684 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2685}
2686
2687/* Send ACL data */
2688static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2689{
2690 struct hci_acl_hdr *hdr;
2691 int len = skb->len;
2692
badff6d0
ACM
2693 skb_push(skb, HCI_ACL_HDR_SIZE);
2694 skb_reset_transport_header(skb);
9c70220b 2695 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2696 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2697 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2698}
2699
ee22be7e 2700static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2701 struct sk_buff *skb, __u16 flags)
1da177e4 2702{
ee22be7e 2703 struct hci_conn *conn = chan->conn;
1da177e4
LT
2704 struct hci_dev *hdev = conn->hdev;
2705 struct sk_buff *list;
2706
087bfd99
GP
2707 skb->len = skb_headlen(skb);
2708 skb->data_len = 0;
2709
2710 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2711
2712 switch (hdev->dev_type) {
2713 case HCI_BREDR:
2714 hci_add_acl_hdr(skb, conn->handle, flags);
2715 break;
2716 case HCI_AMP:
2717 hci_add_acl_hdr(skb, chan->handle, flags);
2718 break;
2719 default:
2720 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2721 return;
2722 }
087bfd99 2723
70f23020
AE
2724 list = skb_shinfo(skb)->frag_list;
2725 if (!list) {
1da177e4
LT
2726 /* Non fragmented */
2727 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2728
73d80deb 2729 skb_queue_tail(queue, skb);
1da177e4
LT
2730 } else {
2731 /* Fragmented */
2732 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2733
2734 skb_shinfo(skb)->frag_list = NULL;
2735
2736 /* Queue all fragments atomically */
af3e6359 2737 spin_lock(&queue->lock);
1da177e4 2738
73d80deb 2739 __skb_queue_tail(queue, skb);
e702112f
AE
2740
2741 flags &= ~ACL_START;
2742 flags |= ACL_CONT;
1da177e4
LT
2743 do {
2744 skb = list; list = list->next;
8e87d142 2745
1da177e4 2746 skb->dev = (void *) hdev;
0d48d939 2747 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2748 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2749
2750 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2751
73d80deb 2752 __skb_queue_tail(queue, skb);
1da177e4
LT
2753 } while (list);
2754
af3e6359 2755 spin_unlock(&queue->lock);
1da177e4 2756 }
73d80deb
LAD
2757}
2758
2759void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2760{
ee22be7e 2761 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2762
f0e09510 2763 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2764
2765 skb->dev = (void *) hdev;
73d80deb 2766
ee22be7e 2767 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2768
3eff45ea 2769 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2770}
1da177e4
LT
2771
2772/* Send SCO data */
0d861d8b 2773void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2774{
2775 struct hci_dev *hdev = conn->hdev;
2776 struct hci_sco_hdr hdr;
2777
2778 BT_DBG("%s len %d", hdev->name, skb->len);
2779
aca3192c 2780 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2781 hdr.dlen = skb->len;
2782
badff6d0
ACM
2783 skb_push(skb, HCI_SCO_HDR_SIZE);
2784 skb_reset_transport_header(skb);
9c70220b 2785 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2786
2787 skb->dev = (void *) hdev;
0d48d939 2788 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2789
1da177e4 2790 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2791 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2792}
1da177e4
LT
2793
2794/* ---- HCI TX task (outgoing data) ---- */
2795
2796/* HCI Connection scheduler */
6039aa73
GP
2797static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2798 int *quote)
1da177e4
LT
2799{
2800 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2801 struct hci_conn *conn = NULL, *c;
abc5de8f 2802 unsigned int num = 0, min = ~0;
1da177e4 2803
8e87d142 2804 /* We don't have to lock device here. Connections are always
1da177e4 2805 * added and removed with TX task disabled. */
bf4c6325
GP
2806
2807 rcu_read_lock();
2808
2809 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2810 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2811 continue;
769be974
MH
2812
2813 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2814 continue;
2815
1da177e4
LT
2816 num++;
2817
2818 if (c->sent < min) {
2819 min = c->sent;
2820 conn = c;
2821 }
52087a79
LAD
2822
2823 if (hci_conn_num(hdev, type) == num)
2824 break;
1da177e4
LT
2825 }
2826
bf4c6325
GP
2827 rcu_read_unlock();
2828
1da177e4 2829 if (conn) {
6ed58ec5
VT
2830 int cnt, q;
2831
2832 switch (conn->type) {
2833 case ACL_LINK:
2834 cnt = hdev->acl_cnt;
2835 break;
2836 case SCO_LINK:
2837 case ESCO_LINK:
2838 cnt = hdev->sco_cnt;
2839 break;
2840 case LE_LINK:
2841 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2842 break;
2843 default:
2844 cnt = 0;
2845 BT_ERR("Unknown link type");
2846 }
2847
2848 q = cnt / num;
1da177e4
LT
2849 *quote = q ? q : 1;
2850 } else
2851 *quote = 0;
2852
2853 BT_DBG("conn %p quote %d", conn, *quote);
2854 return conn;
2855}
2856
6039aa73 2857static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2858{
2859 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2860 struct hci_conn *c;
1da177e4 2861
bae1f5d9 2862 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2863
bf4c6325
GP
2864 rcu_read_lock();
2865
1da177e4 2866 /* Kill stalled connections */
bf4c6325 2867 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2868 if (c->type == type && c->sent) {
6ed93dc6
AE
2869 BT_ERR("%s killing stalled connection %pMR",
2870 hdev->name, &c->dst);
bed71748 2871 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2872 }
2873 }
bf4c6325
GP
2874
2875 rcu_read_unlock();
1da177e4
LT
2876}
2877
6039aa73
GP
2878static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2879 int *quote)
1da177e4 2880{
73d80deb
LAD
2881 struct hci_conn_hash *h = &hdev->conn_hash;
2882 struct hci_chan *chan = NULL;
abc5de8f 2883 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2884 struct hci_conn *conn;
73d80deb
LAD
2885 int cnt, q, conn_num = 0;
2886
2887 BT_DBG("%s", hdev->name);
2888
bf4c6325
GP
2889 rcu_read_lock();
2890
2891 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2892 struct hci_chan *tmp;
2893
2894 if (conn->type != type)
2895 continue;
2896
2897 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2898 continue;
2899
2900 conn_num++;
2901
8192edef 2902 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2903 struct sk_buff *skb;
2904
2905 if (skb_queue_empty(&tmp->data_q))
2906 continue;
2907
2908 skb = skb_peek(&tmp->data_q);
2909 if (skb->priority < cur_prio)
2910 continue;
2911
2912 if (skb->priority > cur_prio) {
2913 num = 0;
2914 min = ~0;
2915 cur_prio = skb->priority;
2916 }
2917
2918 num++;
2919
2920 if (conn->sent < min) {
2921 min = conn->sent;
2922 chan = tmp;
2923 }
2924 }
2925
2926 if (hci_conn_num(hdev, type) == conn_num)
2927 break;
2928 }
2929
bf4c6325
GP
2930 rcu_read_unlock();
2931
73d80deb
LAD
2932 if (!chan)
2933 return NULL;
2934
2935 switch (chan->conn->type) {
2936 case ACL_LINK:
2937 cnt = hdev->acl_cnt;
2938 break;
bd1eb66b
AE
2939 case AMP_LINK:
2940 cnt = hdev->block_cnt;
2941 break;
73d80deb
LAD
2942 case SCO_LINK:
2943 case ESCO_LINK:
2944 cnt = hdev->sco_cnt;
2945 break;
2946 case LE_LINK:
2947 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2948 break;
2949 default:
2950 cnt = 0;
2951 BT_ERR("Unknown link type");
2952 }
2953
2954 q = cnt / num;
2955 *quote = q ? q : 1;
2956 BT_DBG("chan %p quote %d", chan, *quote);
2957 return chan;
2958}
2959
02b20f0b
LAD
2960static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2961{
2962 struct hci_conn_hash *h = &hdev->conn_hash;
2963 struct hci_conn *conn;
2964 int num = 0;
2965
2966 BT_DBG("%s", hdev->name);
2967
bf4c6325
GP
2968 rcu_read_lock();
2969
2970 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2971 struct hci_chan *chan;
2972
2973 if (conn->type != type)
2974 continue;
2975
2976 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2977 continue;
2978
2979 num++;
2980
8192edef 2981 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2982 struct sk_buff *skb;
2983
2984 if (chan->sent) {
2985 chan->sent = 0;
2986 continue;
2987 }
2988
2989 if (skb_queue_empty(&chan->data_q))
2990 continue;
2991
2992 skb = skb_peek(&chan->data_q);
2993 if (skb->priority >= HCI_PRIO_MAX - 1)
2994 continue;
2995
2996 skb->priority = HCI_PRIO_MAX - 1;
2997
2998 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2999 skb->priority);
02b20f0b
LAD
3000 }
3001
3002 if (hci_conn_num(hdev, type) == num)
3003 break;
3004 }
bf4c6325
GP
3005
3006 rcu_read_unlock();
3007
02b20f0b
LAD
3008}
3009
b71d385a
AE
3010static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3011{
3012 /* Calculate count of blocks used by this packet */
3013 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3014}
3015
6039aa73 3016static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3017{
1da177e4
LT
3018 if (!test_bit(HCI_RAW, &hdev->flags)) {
3019 /* ACL tx timeout must be longer than maximum
3020 * link supervision timeout (40.9 seconds) */
63d2bc1b 3021 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3022 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3023 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3024 }
63d2bc1b 3025}
1da177e4 3026
6039aa73 3027static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3028{
3029 unsigned int cnt = hdev->acl_cnt;
3030 struct hci_chan *chan;
3031 struct sk_buff *skb;
3032 int quote;
3033
3034 __check_timeout(hdev, cnt);
04837f64 3035
73d80deb 3036 while (hdev->acl_cnt &&
a8c5fb1a 3037 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3038 u32 priority = (skb_peek(&chan->data_q))->priority;
3039 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3040 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3041 skb->len, skb->priority);
73d80deb 3042
ec1cce24
LAD
3043 /* Stop if priority has changed */
3044 if (skb->priority < priority)
3045 break;
3046
3047 skb = skb_dequeue(&chan->data_q);
3048
73d80deb 3049 hci_conn_enter_active_mode(chan->conn,
04124681 3050 bt_cb(skb)->force_active);
04837f64 3051
1da177e4
LT
3052 hci_send_frame(skb);
3053 hdev->acl_last_tx = jiffies;
3054
3055 hdev->acl_cnt--;
73d80deb
LAD
3056 chan->sent++;
3057 chan->conn->sent++;
1da177e4
LT
3058 }
3059 }
02b20f0b
LAD
3060
3061 if (cnt != hdev->acl_cnt)
3062 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3063}
3064
6039aa73 3065static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3066{
63d2bc1b 3067 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3068 struct hci_chan *chan;
3069 struct sk_buff *skb;
3070 int quote;
bd1eb66b 3071 u8 type;
b71d385a 3072
63d2bc1b 3073 __check_timeout(hdev, cnt);
b71d385a 3074
bd1eb66b
AE
3075 BT_DBG("%s", hdev->name);
3076
3077 if (hdev->dev_type == HCI_AMP)
3078 type = AMP_LINK;
3079 else
3080 type = ACL_LINK;
3081
b71d385a 3082 while (hdev->block_cnt > 0 &&
bd1eb66b 3083 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3084 u32 priority = (skb_peek(&chan->data_q))->priority;
3085 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3086 int blocks;
3087
3088 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3089 skb->len, skb->priority);
b71d385a
AE
3090
3091 /* Stop if priority has changed */
3092 if (skb->priority < priority)
3093 break;
3094
3095 skb = skb_dequeue(&chan->data_q);
3096
3097 blocks = __get_blocks(hdev, skb);
3098 if (blocks > hdev->block_cnt)
3099 return;
3100
3101 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3102 bt_cb(skb)->force_active);
b71d385a
AE
3103
3104 hci_send_frame(skb);
3105 hdev->acl_last_tx = jiffies;
3106
3107 hdev->block_cnt -= blocks;
3108 quote -= blocks;
3109
3110 chan->sent += blocks;
3111 chan->conn->sent += blocks;
3112 }
3113 }
3114
3115 if (cnt != hdev->block_cnt)
bd1eb66b 3116 hci_prio_recalculate(hdev, type);
b71d385a
AE
3117}
3118
6039aa73 3119static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3120{
3121 BT_DBG("%s", hdev->name);
3122
bd1eb66b
AE
3123 /* No ACL link over BR/EDR controller */
3124 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3125 return;
3126
3127 /* No AMP link over AMP controller */
3128 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3129 return;
3130
3131 switch (hdev->flow_ctl_mode) {
3132 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3133 hci_sched_acl_pkt(hdev);
3134 break;
3135
3136 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3137 hci_sched_acl_blk(hdev);
3138 break;
3139 }
3140}
3141
1da177e4 3142/* Schedule SCO */
6039aa73 3143static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3144{
3145 struct hci_conn *conn;
3146 struct sk_buff *skb;
3147 int quote;
3148
3149 BT_DBG("%s", hdev->name);
3150
52087a79
LAD
3151 if (!hci_conn_num(hdev, SCO_LINK))
3152 return;
3153
1da177e4
LT
3154 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3155 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3156 BT_DBG("skb %p len %d", skb, skb->len);
3157 hci_send_frame(skb);
3158
3159 conn->sent++;
3160 if (conn->sent == ~0)
3161 conn->sent = 0;
3162 }
3163 }
3164}
3165
6039aa73 3166static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3167{
3168 struct hci_conn *conn;
3169 struct sk_buff *skb;
3170 int quote;
3171
3172 BT_DBG("%s", hdev->name);
3173
52087a79
LAD
3174 if (!hci_conn_num(hdev, ESCO_LINK))
3175 return;
3176
8fc9ced3
GP
3177 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3178 &quote))) {
b6a0dc82
MH
3179 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3180 BT_DBG("skb %p len %d", skb, skb->len);
3181 hci_send_frame(skb);
3182
3183 conn->sent++;
3184 if (conn->sent == ~0)
3185 conn->sent = 0;
3186 }
3187 }
3188}
3189
6039aa73 3190static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3191{
73d80deb 3192 struct hci_chan *chan;
6ed58ec5 3193 struct sk_buff *skb;
02b20f0b 3194 int quote, cnt, tmp;
6ed58ec5
VT
3195
3196 BT_DBG("%s", hdev->name);
3197
52087a79
LAD
3198 if (!hci_conn_num(hdev, LE_LINK))
3199 return;
3200
6ed58ec5
VT
3201 if (!test_bit(HCI_RAW, &hdev->flags)) {
3202 /* LE tx timeout must be longer than maximum
3203 * link supervision timeout (40.9 seconds) */
bae1f5d9 3204 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3205 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3206 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3207 }
3208
3209 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3210 tmp = cnt;
73d80deb 3211 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3212 u32 priority = (skb_peek(&chan->data_q))->priority;
3213 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3214 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3215 skb->len, skb->priority);
6ed58ec5 3216
ec1cce24
LAD
3217 /* Stop if priority has changed */
3218 if (skb->priority < priority)
3219 break;
3220
3221 skb = skb_dequeue(&chan->data_q);
3222
6ed58ec5
VT
3223 hci_send_frame(skb);
3224 hdev->le_last_tx = jiffies;
3225
3226 cnt--;
73d80deb
LAD
3227 chan->sent++;
3228 chan->conn->sent++;
6ed58ec5
VT
3229 }
3230 }
73d80deb 3231
6ed58ec5
VT
3232 if (hdev->le_pkts)
3233 hdev->le_cnt = cnt;
3234 else
3235 hdev->acl_cnt = cnt;
02b20f0b
LAD
3236
3237 if (cnt != tmp)
3238 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3239}
3240
3eff45ea 3241static void hci_tx_work(struct work_struct *work)
1da177e4 3242{
3eff45ea 3243 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3244 struct sk_buff *skb;
3245
6ed58ec5 3246 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3247 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
3248
3249 /* Schedule queues and send stuff to HCI driver */
3250
3251 hci_sched_acl(hdev);
3252
3253 hci_sched_sco(hdev);
3254
b6a0dc82
MH
3255 hci_sched_esco(hdev);
3256
6ed58ec5
VT
3257 hci_sched_le(hdev);
3258
1da177e4
LT
3259 /* Send next queued raw (unknown type) packet */
3260 while ((skb = skb_dequeue(&hdev->raw_q)))
3261 hci_send_frame(skb);
1da177e4
LT
3262}
3263
25985edc 3264/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3265
3266/* ACL data packet */
6039aa73 3267static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3268{
3269 struct hci_acl_hdr *hdr = (void *) skb->data;
3270 struct hci_conn *conn;
3271 __u16 handle, flags;
3272
3273 skb_pull(skb, HCI_ACL_HDR_SIZE);
3274
3275 handle = __le16_to_cpu(hdr->handle);
3276 flags = hci_flags(handle);
3277 handle = hci_handle(handle);
3278
f0e09510 3279 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3280 handle, flags);
1da177e4
LT
3281
3282 hdev->stat.acl_rx++;
3283
3284 hci_dev_lock(hdev);
3285 conn = hci_conn_hash_lookup_handle(hdev, handle);
3286 hci_dev_unlock(hdev);
8e87d142 3287
1da177e4 3288 if (conn) {
65983fc7 3289 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3290
1da177e4 3291 /* Send to upper protocol */
686ebf28
UF
3292 l2cap_recv_acldata(conn, skb, flags);
3293 return;
1da177e4 3294 } else {
8e87d142 3295 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3296 hdev->name, handle);
1da177e4
LT
3297 }
3298
3299 kfree_skb(skb);
3300}
3301
3302/* SCO data packet */
6039aa73 3303static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3304{
3305 struct hci_sco_hdr *hdr = (void *) skb->data;
3306 struct hci_conn *conn;
3307 __u16 handle;
3308
3309 skb_pull(skb, HCI_SCO_HDR_SIZE);
3310
3311 handle = __le16_to_cpu(hdr->handle);
3312
f0e09510 3313 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3314
3315 hdev->stat.sco_rx++;
3316
3317 hci_dev_lock(hdev);
3318 conn = hci_conn_hash_lookup_handle(hdev, handle);
3319 hci_dev_unlock(hdev);
3320
3321 if (conn) {
1da177e4 3322 /* Send to upper protocol */
686ebf28
UF
3323 sco_recv_scodata(conn, skb);
3324 return;
1da177e4 3325 } else {
8e87d142 3326 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3327 hdev->name, handle);
1da177e4
LT
3328 }
3329
3330 kfree_skb(skb);
3331}
3332
9238f36a
JH
3333static bool hci_req_is_complete(struct hci_dev *hdev)
3334{
3335 struct sk_buff *skb;
3336
3337 skb = skb_peek(&hdev->cmd_q);
3338 if (!skb)
3339 return true;
3340
3341 return bt_cb(skb)->req.start;
3342}
3343
42c6b129
JH
3344static void hci_resend_last(struct hci_dev *hdev)
3345{
3346 struct hci_command_hdr *sent;
3347 struct sk_buff *skb;
3348 u16 opcode;
3349
3350 if (!hdev->sent_cmd)
3351 return;
3352
3353 sent = (void *) hdev->sent_cmd->data;
3354 opcode = __le16_to_cpu(sent->opcode);
3355 if (opcode == HCI_OP_RESET)
3356 return;
3357
3358 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3359 if (!skb)
3360 return;
3361
3362 skb_queue_head(&hdev->cmd_q, skb);
3363 queue_work(hdev->workqueue, &hdev->cmd_work);
3364}
3365
9238f36a
JH
3366void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3367{
3368 hci_req_complete_t req_complete = NULL;
3369 struct sk_buff *skb;
3370 unsigned long flags;
3371
3372 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3373
42c6b129
JH
3374 /* If the completed command doesn't match the last one that was
3375 * sent we need to do special handling of it.
9238f36a 3376 */
42c6b129
JH
3377 if (!hci_sent_cmd_data(hdev, opcode)) {
3378 /* Some CSR based controllers generate a spontaneous
3379 * reset complete event during init and any pending
3380 * command will never be completed. In such a case we
3381 * need to resend whatever was the last sent
3382 * command.
3383 */
3384 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3385 hci_resend_last(hdev);
3386
9238f36a 3387 return;
42c6b129 3388 }
9238f36a
JH
3389
3390 /* If the command succeeded and there's still more commands in
3391 * this request the request is not yet complete.
3392 */
3393 if (!status && !hci_req_is_complete(hdev))
3394 return;
3395
3396 /* If this was the last command in a request the complete
3397 * callback would be found in hdev->sent_cmd instead of the
3398 * command queue (hdev->cmd_q).
3399 */
3400 if (hdev->sent_cmd) {
3401 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3402 if (req_complete)
3403 goto call_complete;
3404 }
3405
3406 /* Remove all pending commands belonging to this request */
3407 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3408 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3409 if (bt_cb(skb)->req.start) {
3410 __skb_queue_head(&hdev->cmd_q, skb);
3411 break;
3412 }
3413
3414 req_complete = bt_cb(skb)->req.complete;
3415 kfree_skb(skb);
3416 }
3417 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3418
3419call_complete:
3420 if (req_complete)
3421 req_complete(hdev, status);
3422}
3423
b78752cc 3424static void hci_rx_work(struct work_struct *work)
1da177e4 3425{
b78752cc 3426 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3427 struct sk_buff *skb;
3428
3429 BT_DBG("%s", hdev->name);
3430
1da177e4 3431 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3432 /* Send copy to monitor */
3433 hci_send_to_monitor(hdev, skb);
3434
1da177e4
LT
3435 if (atomic_read(&hdev->promisc)) {
3436 /* Send copy to the sockets */
470fe1b5 3437 hci_send_to_sock(hdev, skb);
1da177e4
LT
3438 }
3439
3440 if (test_bit(HCI_RAW, &hdev->flags)) {
3441 kfree_skb(skb);
3442 continue;
3443 }
3444
3445 if (test_bit(HCI_INIT, &hdev->flags)) {
3446 /* Don't process data packets in this states. */
0d48d939 3447 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3448 case HCI_ACLDATA_PKT:
3449 case HCI_SCODATA_PKT:
3450 kfree_skb(skb);
3451 continue;
3ff50b79 3452 }
1da177e4
LT
3453 }
3454
3455 /* Process frame */
0d48d939 3456 switch (bt_cb(skb)->pkt_type) {
1da177e4 3457 case HCI_EVENT_PKT:
b78752cc 3458 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3459 hci_event_packet(hdev, skb);
3460 break;
3461
3462 case HCI_ACLDATA_PKT:
3463 BT_DBG("%s ACL data packet", hdev->name);
3464 hci_acldata_packet(hdev, skb);
3465 break;
3466
3467 case HCI_SCODATA_PKT:
3468 BT_DBG("%s SCO data packet", hdev->name);
3469 hci_scodata_packet(hdev, skb);
3470 break;
3471
3472 default:
3473 kfree_skb(skb);
3474 break;
3475 }
3476 }
1da177e4
LT
3477}
3478
c347b765 3479static void hci_cmd_work(struct work_struct *work)
1da177e4 3480{
c347b765 3481 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3482 struct sk_buff *skb;
3483
2104786b
AE
3484 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3485 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3486
1da177e4 3487 /* Send queued commands */
5a08ecce
AE
3488 if (atomic_read(&hdev->cmd_cnt)) {
3489 skb = skb_dequeue(&hdev->cmd_q);
3490 if (!skb)
3491 return;
3492
7585b97a 3493 kfree_skb(hdev->sent_cmd);
1da177e4 3494
70f23020
AE
3495 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3496 if (hdev->sent_cmd) {
1da177e4
LT
3497 atomic_dec(&hdev->cmd_cnt);
3498 hci_send_frame(skb);
7bdb8a5c
SJ
3499 if (test_bit(HCI_RESET, &hdev->flags))
3500 del_timer(&hdev->cmd_timer);
3501 else
3502 mod_timer(&hdev->cmd_timer,
5f246e89 3503 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3504 } else {
3505 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3506 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3507 }
3508 }
3509}
2519a1fc 3510
31f7956c
AG
3511u8 bdaddr_to_le(u8 bdaddr_type)
3512{
3513 switch (bdaddr_type) {
3514 case BDADDR_LE_PUBLIC:
3515 return ADDR_LE_DEV_PUBLIC;
3516
3517 default:
3518 /* Fallback to LE Random address type */
3519 return ADDR_LE_DEV_RANDOM;
3520 }
3521}
This page took 0.954068 seconds and 5 git commands to generate.