Bluetooth: Make mgmt_discovering() return void
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
77a63e0a
FW
82static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
75e84b7c
JH
84{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
7b1abbbe
JH
107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
75e84b7c
JH
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
7b1abbbe 137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 138 const void *param, u8 event, u32 timeout)
75e84b7c
JH
139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
7b1abbbe 148 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
7b1abbbe
JH
187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 192 const void *param, u32 timeout)
7b1abbbe
JH
193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
1da177e4 198/* Execute request and wait for completion. */
01178cd4 199static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
200 void (*func)(struct hci_request *req,
201 unsigned long opt),
01178cd4 202 unsigned long opt, __u32 timeout)
1da177e4 203{
42c6b129 204 struct hci_request req;
1da177e4
LT
205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
42c6b129
JH
210 hci_req_init(&req, hdev);
211
1da177e4
LT
212 hdev->req_status = HCI_REQ_PEND;
213
42c6b129 214 func(&req, opt);
53cce22d 215
42c6b129
JH
216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
53cce22d 218 hdev->req_status = 0;
920c8300
AG
219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
42c6b129 224 */
920c8300
AG
225 if (err == -ENODATA)
226 return 0;
227
228 return err;
53cce22d
JH
229 }
230
bc4445c7
AG
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
1da177e4
LT
234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
e175072f 243 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
3ff50b79 253 }
1da177e4 254
a5040efa 255 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
01178cd4 262static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
263 void (*req)(struct hci_request *req,
264 unsigned long opt),
01178cd4 265 unsigned long opt, __u32 timeout)
1da177e4
LT
266{
267 int ret;
268
7c6a329e
MH
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
1da177e4
LT
272 /* Serialize all requests */
273 hci_req_lock(hdev);
01178cd4 274 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
42c6b129 280static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 281{
42c6b129 282 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
283
284 /* Reset device */
42c6b129
JH
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
287}
288
42c6b129 289static void bredr_init(struct hci_request *req)
1da177e4 290{
42c6b129 291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 292
1da177e4 293 /* Read Local Supported Features */
42c6b129 294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 295
1143e5a6 296 /* Read Local Version */
42c6b129 297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
298
299 /* Read BD Address */
42c6b129 300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
301}
302
42c6b129 303static void amp_init(struct hci_request *req)
e61ef499 304{
42c6b129 305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 306
e61ef499 307 /* Read Local Version */
42c6b129 308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
309
310 /* Read Local AMP Info */
42c6b129 311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
312
313 /* Read Data Blk size */
42c6b129 314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
315}
316
42c6b129 317static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 318{
42c6b129 319 struct hci_dev *hdev = req->hdev;
e61ef499
AE
320
321 BT_DBG("%s %ld", hdev->name, opt);
322
11778716
AE
323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 325 hci_reset_req(req, 0);
11778716 326
e61ef499
AE
327 switch (hdev->dev_type) {
328 case HCI_BREDR:
42c6b129 329 bredr_init(req);
e61ef499
AE
330 break;
331
332 case HCI_AMP:
42c6b129 333 amp_init(req);
e61ef499
AE
334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
e61ef499
AE
340}
341
42c6b129 342static void bredr_setup(struct hci_request *req)
2177bab5 343{
2177bab5
JH
344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
349
350 /* Read Class of Device */
42c6b129 351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
352
353 /* Read Local Name */
42c6b129 354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
355
356 /* Read Voice Setting */
42c6b129 357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
42c6b129 365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 366
f332ec66
JH
367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
2177bab5
JH
372}
373
42c6b129 374static void le_setup(struct hci_request *req)
2177bab5 375{
c73eee91
JH
376 struct hci_dev *hdev = req->hdev;
377
2177bab5 378 /* Read LE Buffer Size */
42c6b129 379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
380
381 /* Read LE Local Supported Features */
42c6b129 382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
383
384 /* Read LE Advertising Channel TX Power */
42c6b129 385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
386
387 /* Read LE White List Size */
42c6b129 388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
389
390 /* Read LE Supported States */
42c6b129 391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
42c6b129 426static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
427{
428 u8 mode;
429
42c6b129 430 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 431
42c6b129 432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
433}
434
42c6b129 435static void hci_setup_event_mask(struct hci_request *req)
2177bab5 436{
42c6b129
JH
437 struct hci_dev *hdev = req->hdev;
438
2177bab5
JH
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
42c6b129 506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
42c6b129
JH
511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
2177bab5
JH
513 }
514}
515
42c6b129 516static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 517{
42c6b129
JH
518 struct hci_dev *hdev = req->hdev;
519
2177bab5 520 if (lmp_bredr_capable(hdev))
42c6b129 521 bredr_setup(req);
56f87901
JH
522 else
523 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
524
525 if (lmp_le_capable(hdev))
42c6b129 526 le_setup(req);
2177bab5 527
42c6b129 528 hci_setup_event_mask(req);
2177bab5 529
3f8e2d75
JH
530 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
531 * local supported commands HCI command.
532 */
533 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 534 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
535
536 if (lmp_ssp_capable(hdev)) {
537 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
538 u8 mode = 0x01;
42c6b129
JH
539 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
540 sizeof(mode), &mode);
2177bab5
JH
541 } else {
542 struct hci_cp_write_eir cp;
543
544 memset(hdev->eir, 0, sizeof(hdev->eir));
545 memset(&cp, 0, sizeof(cp));
546
42c6b129 547 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
548 }
549 }
550
551 if (lmp_inq_rssi_capable(hdev))
42c6b129 552 hci_setup_inquiry_mode(req);
2177bab5
JH
553
554 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 555 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
556
557 if (lmp_ext_feat_capable(hdev)) {
558 struct hci_cp_read_local_ext_features cp;
559
560 cp.page = 0x01;
42c6b129
JH
561 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
562 sizeof(cp), &cp);
2177bab5
JH
563 }
564
565 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
566 u8 enable = 1;
42c6b129
JH
567 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
568 &enable);
2177bab5
JH
569 }
570}
571
42c6b129 572static void hci_setup_link_policy(struct hci_request *req)
2177bab5 573{
42c6b129 574 struct hci_dev *hdev = req->hdev;
2177bab5
JH
575 struct hci_cp_write_def_link_policy cp;
576 u16 link_policy = 0;
577
578 if (lmp_rswitch_capable(hdev))
579 link_policy |= HCI_LP_RSWITCH;
580 if (lmp_hold_capable(hdev))
581 link_policy |= HCI_LP_HOLD;
582 if (lmp_sniff_capable(hdev))
583 link_policy |= HCI_LP_SNIFF;
584 if (lmp_park_capable(hdev))
585 link_policy |= HCI_LP_PARK;
586
587 cp.policy = cpu_to_le16(link_policy);
42c6b129 588 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
589}
590
42c6b129 591static void hci_set_le_support(struct hci_request *req)
2177bab5 592{
42c6b129 593 struct hci_dev *hdev = req->hdev;
2177bab5
JH
594 struct hci_cp_write_le_host_supported cp;
595
c73eee91
JH
596 /* LE-only devices do not support explicit enablement */
597 if (!lmp_bredr_capable(hdev))
598 return;
599
2177bab5
JH
600 memset(&cp, 0, sizeof(cp));
601
602 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
603 cp.le = 0x01;
604 cp.simul = lmp_le_br_capable(hdev);
605 }
606
607 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
608 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
609 &cp);
2177bab5
JH
610}
611
d62e6d67
JH
612static void hci_set_event_mask_page_2(struct hci_request *req)
613{
614 struct hci_dev *hdev = req->hdev;
615 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
616
617 /* If Connectionless Slave Broadcast master role is supported
618 * enable all necessary events for it.
619 */
620 if (hdev->features[2][0] & 0x01) {
621 events[1] |= 0x40; /* Triggered Clock Capture */
622 events[1] |= 0x80; /* Synchronization Train Complete */
623 events[2] |= 0x10; /* Slave Page Response Timeout */
624 events[2] |= 0x20; /* CSB Channel Map Change */
625 }
626
627 /* If Connectionless Slave Broadcast slave role is supported
628 * enable all necessary events for it.
629 */
630 if (hdev->features[2][0] & 0x02) {
631 events[2] |= 0x01; /* Synchronization Train Received */
632 events[2] |= 0x02; /* CSB Receive */
633 events[2] |= 0x04; /* CSB Timeout */
634 events[2] |= 0x08; /* Truncated Page Complete */
635 }
636
637 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
638}
639
42c6b129 640static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 641{
42c6b129 642 struct hci_dev *hdev = req->hdev;
d2c5d77f 643 u8 p;
42c6b129 644
b8f4e068
GP
645 /* Some Broadcom based Bluetooth controllers do not support the
646 * Delete Stored Link Key command. They are clearly indicating its
647 * absence in the bit mask of supported commands.
648 *
649 * Check the supported commands and only if the the command is marked
650 * as supported send it. If not supported assume that the controller
651 * does not have actual support for stored link keys which makes this
652 * command redundant anyway.
637b4cae 653 */
59f45d57
JH
654 if (hdev->commands[6] & 0x80) {
655 struct hci_cp_delete_stored_link_key cp;
656
657 bacpy(&cp.bdaddr, BDADDR_ANY);
658 cp.delete_all = 0x01;
659 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
660 sizeof(cp), &cp);
661 }
662
2177bab5 663 if (hdev->commands[5] & 0x10)
42c6b129 664 hci_setup_link_policy(req);
2177bab5 665
04b4edcb 666 if (lmp_le_capable(hdev)) {
42c6b129 667 hci_set_le_support(req);
04b4edcb
JH
668 hci_update_ad(req);
669 }
d2c5d77f
JH
670
671 /* Read features beyond page 1 if available */
672 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
673 struct hci_cp_read_local_ext_features cp;
674
675 cp.page = p;
676 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
677 sizeof(cp), &cp);
678 }
2177bab5
JH
679}
680
5d4e7e8d
JH
681static void hci_init4_req(struct hci_request *req, unsigned long opt)
682{
683 struct hci_dev *hdev = req->hdev;
684
d62e6d67
JH
685 /* Set event mask page 2 if the HCI command for it is supported */
686 if (hdev->commands[22] & 0x04)
687 hci_set_event_mask_page_2(req);
688
5d4e7e8d
JH
689 /* Check for Synchronization Train support */
690 if (hdev->features[2][0] & 0x04)
691 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
692}
693
2177bab5
JH
694static int __hci_init(struct hci_dev *hdev)
695{
696 int err;
697
698 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
699 if (err < 0)
700 return err;
701
702 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
703 * BR/EDR/LE type controllers. AMP controllers only need the
704 * first stage init.
705 */
706 if (hdev->dev_type != HCI_BREDR)
707 return 0;
708
709 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
710 if (err < 0)
711 return err;
712
5d4e7e8d
JH
713 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
714 if (err < 0)
715 return err;
716
717 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
2177bab5
JH
718}
719
42c6b129 720static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
721{
722 __u8 scan = opt;
723
42c6b129 724 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
725
726 /* Inquiry and Page scans */
42c6b129 727 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
728}
729
42c6b129 730static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
731{
732 __u8 auth = opt;
733
42c6b129 734 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
735
736 /* Authentication */
42c6b129 737 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
738}
739
42c6b129 740static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
741{
742 __u8 encrypt = opt;
743
42c6b129 744 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 745
e4e8e37c 746 /* Encryption */
42c6b129 747 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
748}
749
42c6b129 750static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
751{
752 __le16 policy = cpu_to_le16(opt);
753
42c6b129 754 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
755
756 /* Default link policy */
42c6b129 757 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
758}
759
8e87d142 760/* Get HCI device by index.
1da177e4
LT
761 * Device is held on return. */
762struct hci_dev *hci_dev_get(int index)
763{
8035ded4 764 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
765
766 BT_DBG("%d", index);
767
768 if (index < 0)
769 return NULL;
770
771 read_lock(&hci_dev_list_lock);
8035ded4 772 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
773 if (d->id == index) {
774 hdev = hci_dev_hold(d);
775 break;
776 }
777 }
778 read_unlock(&hci_dev_list_lock);
779 return hdev;
780}
1da177e4
LT
781
782/* ---- Inquiry support ---- */
ff9ef578 783
30dc78e1
JH
784bool hci_discovery_active(struct hci_dev *hdev)
785{
786 struct discovery_state *discov = &hdev->discovery;
787
6fbe195d 788 switch (discov->state) {
343f935b 789 case DISCOVERY_FINDING:
6fbe195d 790 case DISCOVERY_RESOLVING:
30dc78e1
JH
791 return true;
792
6fbe195d
AG
793 default:
794 return false;
795 }
30dc78e1
JH
796}
797
ff9ef578
JH
798void hci_discovery_set_state(struct hci_dev *hdev, int state)
799{
800 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
801
802 if (hdev->discovery.state == state)
803 return;
804
805 switch (state) {
806 case DISCOVERY_STOPPED:
7b99b659
AG
807 if (hdev->discovery.state != DISCOVERY_STARTING)
808 mgmt_discovering(hdev, 0);
ff9ef578
JH
809 break;
810 case DISCOVERY_STARTING:
811 break;
343f935b 812 case DISCOVERY_FINDING:
ff9ef578
JH
813 mgmt_discovering(hdev, 1);
814 break;
30dc78e1
JH
815 case DISCOVERY_RESOLVING:
816 break;
ff9ef578
JH
817 case DISCOVERY_STOPPING:
818 break;
819 }
820
821 hdev->discovery.state = state;
822}
823
1f9b9a5d 824void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 825{
30883512 826 struct discovery_state *cache = &hdev->discovery;
b57c1a56 827 struct inquiry_entry *p, *n;
1da177e4 828
561aafbc
JH
829 list_for_each_entry_safe(p, n, &cache->all, all) {
830 list_del(&p->all);
b57c1a56 831 kfree(p);
1da177e4 832 }
561aafbc
JH
833
834 INIT_LIST_HEAD(&cache->unknown);
835 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
836}
837
a8c5fb1a
GP
838struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
839 bdaddr_t *bdaddr)
1da177e4 840{
30883512 841 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
842 struct inquiry_entry *e;
843
6ed93dc6 844 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 845
561aafbc
JH
846 list_for_each_entry(e, &cache->all, all) {
847 if (!bacmp(&e->data.bdaddr, bdaddr))
848 return e;
849 }
850
851 return NULL;
852}
853
854struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 855 bdaddr_t *bdaddr)
561aafbc 856{
30883512 857 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
858 struct inquiry_entry *e;
859
6ed93dc6 860 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
861
862 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 863 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
864 return e;
865 }
866
867 return NULL;
1da177e4
LT
868}
869
30dc78e1 870struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
871 bdaddr_t *bdaddr,
872 int state)
30dc78e1
JH
873{
874 struct discovery_state *cache = &hdev->discovery;
875 struct inquiry_entry *e;
876
6ed93dc6 877 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
878
879 list_for_each_entry(e, &cache->resolve, list) {
880 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
881 return e;
882 if (!bacmp(&e->data.bdaddr, bdaddr))
883 return e;
884 }
885
886 return NULL;
887}
888
a3d4e20a 889void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 890 struct inquiry_entry *ie)
a3d4e20a
JH
891{
892 struct discovery_state *cache = &hdev->discovery;
893 struct list_head *pos = &cache->resolve;
894 struct inquiry_entry *p;
895
896 list_del(&ie->list);
897
898 list_for_each_entry(p, &cache->resolve, list) {
899 if (p->name_state != NAME_PENDING &&
a8c5fb1a 900 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
901 break;
902 pos = &p->list;
903 }
904
905 list_add(&ie->list, pos);
906}
907
3175405b 908bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 909 bool name_known, bool *ssp)
1da177e4 910{
30883512 911 struct discovery_state *cache = &hdev->discovery;
70f23020 912 struct inquiry_entry *ie;
1da177e4 913
6ed93dc6 914 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 915
2b2fec4d
SJ
916 hci_remove_remote_oob_data(hdev, &data->bdaddr);
917
388fc8fa
JH
918 if (ssp)
919 *ssp = data->ssp_mode;
920
70f23020 921 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 922 if (ie) {
388fc8fa
JH
923 if (ie->data.ssp_mode && ssp)
924 *ssp = true;
925
a3d4e20a 926 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 927 data->rssi != ie->data.rssi) {
a3d4e20a
JH
928 ie->data.rssi = data->rssi;
929 hci_inquiry_cache_update_resolve(hdev, ie);
930 }
931
561aafbc 932 goto update;
a3d4e20a 933 }
561aafbc
JH
934
935 /* Entry not in the cache. Add new one. */
936 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
937 if (!ie)
3175405b 938 return false;
561aafbc
JH
939
940 list_add(&ie->all, &cache->all);
941
942 if (name_known) {
943 ie->name_state = NAME_KNOWN;
944 } else {
945 ie->name_state = NAME_NOT_KNOWN;
946 list_add(&ie->list, &cache->unknown);
947 }
70f23020 948
561aafbc
JH
949update:
950 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 951 ie->name_state != NAME_PENDING) {
561aafbc
JH
952 ie->name_state = NAME_KNOWN;
953 list_del(&ie->list);
1da177e4
LT
954 }
955
70f23020
AE
956 memcpy(&ie->data, data, sizeof(*data));
957 ie->timestamp = jiffies;
1da177e4 958 cache->timestamp = jiffies;
3175405b
JH
959
960 if (ie->name_state == NAME_NOT_KNOWN)
961 return false;
962
963 return true;
1da177e4
LT
964}
965
966static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
967{
30883512 968 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
969 struct inquiry_info *info = (struct inquiry_info *) buf;
970 struct inquiry_entry *e;
971 int copied = 0;
972
561aafbc 973 list_for_each_entry(e, &cache->all, all) {
1da177e4 974 struct inquiry_data *data = &e->data;
b57c1a56
JH
975
976 if (copied >= num)
977 break;
978
1da177e4
LT
979 bacpy(&info->bdaddr, &data->bdaddr);
980 info->pscan_rep_mode = data->pscan_rep_mode;
981 info->pscan_period_mode = data->pscan_period_mode;
982 info->pscan_mode = data->pscan_mode;
983 memcpy(info->dev_class, data->dev_class, 3);
984 info->clock_offset = data->clock_offset;
b57c1a56 985
1da177e4 986 info++;
b57c1a56 987 copied++;
1da177e4
LT
988 }
989
990 BT_DBG("cache %p, copied %d", cache, copied);
991 return copied;
992}
993
42c6b129 994static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
995{
996 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 997 struct hci_dev *hdev = req->hdev;
1da177e4
LT
998 struct hci_cp_inquiry cp;
999
1000 BT_DBG("%s", hdev->name);
1001
1002 if (test_bit(HCI_INQUIRY, &hdev->flags))
1003 return;
1004
1005 /* Start Inquiry */
1006 memcpy(&cp.lap, &ir->lap, 3);
1007 cp.length = ir->length;
1008 cp.num_rsp = ir->num_rsp;
42c6b129 1009 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1010}
1011
3e13fa1e
AG
1012static int wait_inquiry(void *word)
1013{
1014 schedule();
1015 return signal_pending(current);
1016}
1017
1da177e4
LT
1018int hci_inquiry(void __user *arg)
1019{
1020 __u8 __user *ptr = arg;
1021 struct hci_inquiry_req ir;
1022 struct hci_dev *hdev;
1023 int err = 0, do_inquiry = 0, max_rsp;
1024 long timeo;
1025 __u8 *buf;
1026
1027 if (copy_from_user(&ir, ptr, sizeof(ir)))
1028 return -EFAULT;
1029
5a08ecce
AE
1030 hdev = hci_dev_get(ir.dev_id);
1031 if (!hdev)
1da177e4
LT
1032 return -ENODEV;
1033
0736cfa8
MH
1034 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1035 err = -EBUSY;
1036 goto done;
1037 }
1038
56f87901
JH
1039 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1040 err = -EOPNOTSUPP;
1041 goto done;
1042 }
1043
09fd0de5 1044 hci_dev_lock(hdev);
8e87d142 1045 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1046 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1047 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1048 do_inquiry = 1;
1049 }
09fd0de5 1050 hci_dev_unlock(hdev);
1da177e4 1051
04837f64 1052 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1053
1054 if (do_inquiry) {
01178cd4
JH
1055 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1056 timeo);
70f23020
AE
1057 if (err < 0)
1058 goto done;
3e13fa1e
AG
1059
1060 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1061 * cleared). If it is interrupted by a signal, return -EINTR.
1062 */
1063 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1064 TASK_INTERRUPTIBLE))
1065 return -EINTR;
70f23020 1066 }
1da177e4 1067
8fc9ced3
GP
1068 /* for unlimited number of responses we will use buffer with
1069 * 255 entries
1070 */
1da177e4
LT
1071 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1072
1073 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1074 * copy it to the user space.
1075 */
01df8c31 1076 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1077 if (!buf) {
1da177e4
LT
1078 err = -ENOMEM;
1079 goto done;
1080 }
1081
09fd0de5 1082 hci_dev_lock(hdev);
1da177e4 1083 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1084 hci_dev_unlock(hdev);
1da177e4
LT
1085
1086 BT_DBG("num_rsp %d", ir.num_rsp);
1087
1088 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1089 ptr += sizeof(ir);
1090 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1091 ir.num_rsp))
1da177e4 1092 err = -EFAULT;
8e87d142 1093 } else
1da177e4
LT
1094 err = -EFAULT;
1095
1096 kfree(buf);
1097
1098done:
1099 hci_dev_put(hdev);
1100 return err;
1101}
1102
3f0f524b
JH
1103static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1104{
1105 u8 ad_len = 0, flags = 0;
1106 size_t name_len;
1107
f3d3444a 1108 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
3f0f524b
JH
1109 flags |= LE_AD_GENERAL;
1110
11802b29
JH
1111 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1112 if (lmp_le_br_capable(hdev))
1113 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1114 if (lmp_host_le_br_capable(hdev))
1115 flags |= LE_AD_SIM_LE_BREDR_HOST;
1116 } else {
3f0f524b 1117 flags |= LE_AD_NO_BREDR;
11802b29 1118 }
3f0f524b
JH
1119
1120 if (flags) {
1121 BT_DBG("adv flags 0x%02x", flags);
1122
1123 ptr[0] = 2;
1124 ptr[1] = EIR_FLAGS;
1125 ptr[2] = flags;
1126
1127 ad_len += 3;
1128 ptr += 3;
1129 }
1130
1131 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1132 ptr[0] = 2;
1133 ptr[1] = EIR_TX_POWER;
1134 ptr[2] = (u8) hdev->adv_tx_power;
1135
1136 ad_len += 3;
1137 ptr += 3;
1138 }
1139
1140 name_len = strlen(hdev->dev_name);
1141 if (name_len > 0) {
1142 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1143
1144 if (name_len > max_len) {
1145 name_len = max_len;
1146 ptr[1] = EIR_NAME_SHORT;
1147 } else
1148 ptr[1] = EIR_NAME_COMPLETE;
1149
1150 ptr[0] = name_len + 1;
1151
1152 memcpy(ptr + 2, hdev->dev_name, name_len);
1153
1154 ad_len += (name_len + 2);
1155 ptr += (name_len + 2);
1156 }
1157
1158 return ad_len;
1159}
1160
04b4edcb 1161void hci_update_ad(struct hci_request *req)
3f0f524b 1162{
04b4edcb 1163 struct hci_dev *hdev = req->hdev;
3f0f524b
JH
1164 struct hci_cp_le_set_adv_data cp;
1165 u8 len;
3f0f524b 1166
04b4edcb
JH
1167 if (!lmp_le_capable(hdev))
1168 return;
3f0f524b
JH
1169
1170 memset(&cp, 0, sizeof(cp));
1171
1172 len = create_ad(hdev, cp.data);
1173
1174 if (hdev->adv_data_len == len &&
04b4edcb
JH
1175 memcmp(cp.data, hdev->adv_data, len) == 0)
1176 return;
3f0f524b
JH
1177
1178 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1179 hdev->adv_data_len = len;
1180
1181 cp.length = len;
3f0f524b 1182
04b4edcb 1183 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
3f0f524b
JH
1184}
1185
cbed0ca1 1186static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1187{
1da177e4
LT
1188 int ret = 0;
1189
1da177e4
LT
1190 BT_DBG("%s %p", hdev->name, hdev);
1191
1192 hci_req_lock(hdev);
1193
94324962
JH
1194 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1195 ret = -ENODEV;
1196 goto done;
1197 }
1198
a5c8f270
MH
1199 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1200 /* Check for rfkill but allow the HCI setup stage to
1201 * proceed (which in itself doesn't cause any RF activity).
1202 */
1203 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1204 ret = -ERFKILL;
1205 goto done;
1206 }
1207
1208 /* Check for valid public address or a configured static
1209 * random adddress, but let the HCI setup proceed to
1210 * be able to determine if there is a public address
1211 * or not.
1212 *
1213 * This check is only valid for BR/EDR controllers
1214 * since AMP controllers do not have an address.
1215 */
1216 if (hdev->dev_type == HCI_BREDR &&
1217 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1218 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1219 ret = -EADDRNOTAVAIL;
1220 goto done;
1221 }
611b30f7
MH
1222 }
1223
1da177e4
LT
1224 if (test_bit(HCI_UP, &hdev->flags)) {
1225 ret = -EALREADY;
1226 goto done;
1227 }
1228
1da177e4
LT
1229 if (hdev->open(hdev)) {
1230 ret = -EIO;
1231 goto done;
1232 }
1233
f41c70c4
MH
1234 atomic_set(&hdev->cmd_cnt, 1);
1235 set_bit(HCI_INIT, &hdev->flags);
1236
1237 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1238 ret = hdev->setup(hdev);
1239
1240 if (!ret) {
f41c70c4
MH
1241 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1242 set_bit(HCI_RAW, &hdev->flags);
1243
0736cfa8
MH
1244 if (!test_bit(HCI_RAW, &hdev->flags) &&
1245 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1246 ret = __hci_init(hdev);
1da177e4
LT
1247 }
1248
f41c70c4
MH
1249 clear_bit(HCI_INIT, &hdev->flags);
1250
1da177e4
LT
1251 if (!ret) {
1252 hci_dev_hold(hdev);
1253 set_bit(HCI_UP, &hdev->flags);
1254 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1255 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1256 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1257 hdev->dev_type == HCI_BREDR) {
09fd0de5 1258 hci_dev_lock(hdev);
744cf19e 1259 mgmt_powered(hdev, 1);
09fd0de5 1260 hci_dev_unlock(hdev);
56e5cb86 1261 }
8e87d142 1262 } else {
1da177e4 1263 /* Init failed, cleanup */
3eff45ea 1264 flush_work(&hdev->tx_work);
c347b765 1265 flush_work(&hdev->cmd_work);
b78752cc 1266 flush_work(&hdev->rx_work);
1da177e4
LT
1267
1268 skb_queue_purge(&hdev->cmd_q);
1269 skb_queue_purge(&hdev->rx_q);
1270
1271 if (hdev->flush)
1272 hdev->flush(hdev);
1273
1274 if (hdev->sent_cmd) {
1275 kfree_skb(hdev->sent_cmd);
1276 hdev->sent_cmd = NULL;
1277 }
1278
1279 hdev->close(hdev);
1280 hdev->flags = 0;
1281 }
1282
1283done:
1284 hci_req_unlock(hdev);
1da177e4
LT
1285 return ret;
1286}
1287
cbed0ca1
JH
1288/* ---- HCI ioctl helpers ---- */
1289
1290int hci_dev_open(__u16 dev)
1291{
1292 struct hci_dev *hdev;
1293 int err;
1294
1295 hdev = hci_dev_get(dev);
1296 if (!hdev)
1297 return -ENODEV;
1298
e1d08f40
JH
1299 /* We need to ensure that no other power on/off work is pending
1300 * before proceeding to call hci_dev_do_open. This is
1301 * particularly important if the setup procedure has not yet
1302 * completed.
1303 */
1304 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1305 cancel_delayed_work(&hdev->power_off);
1306
a5c8f270
MH
1307 /* After this call it is guaranteed that the setup procedure
1308 * has finished. This means that error conditions like RFKILL
1309 * or no valid public or static random address apply.
1310 */
e1d08f40
JH
1311 flush_workqueue(hdev->req_workqueue);
1312
cbed0ca1
JH
1313 err = hci_dev_do_open(hdev);
1314
1315 hci_dev_put(hdev);
1316
1317 return err;
1318}
1319
1da177e4
LT
1320static int hci_dev_do_close(struct hci_dev *hdev)
1321{
1322 BT_DBG("%s %p", hdev->name, hdev);
1323
78c04c0b
VCG
1324 cancel_delayed_work(&hdev->power_off);
1325
1da177e4
LT
1326 hci_req_cancel(hdev, ENODEV);
1327 hci_req_lock(hdev);
1328
1329 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1330 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1331 hci_req_unlock(hdev);
1332 return 0;
1333 }
1334
3eff45ea
GP
1335 /* Flush RX and TX works */
1336 flush_work(&hdev->tx_work);
b78752cc 1337 flush_work(&hdev->rx_work);
1da177e4 1338
16ab91ab 1339 if (hdev->discov_timeout > 0) {
e0f9309f 1340 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1341 hdev->discov_timeout = 0;
5e5282bb 1342 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1343 }
1344
a8b2d5c2 1345 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1346 cancel_delayed_work(&hdev->service_cache);
1347
7ba8b4be
AG
1348 cancel_delayed_work_sync(&hdev->le_scan_disable);
1349
09fd0de5 1350 hci_dev_lock(hdev);
1f9b9a5d 1351 hci_inquiry_cache_flush(hdev);
1da177e4 1352 hci_conn_hash_flush(hdev);
09fd0de5 1353 hci_dev_unlock(hdev);
1da177e4
LT
1354
1355 hci_notify(hdev, HCI_DEV_DOWN);
1356
1357 if (hdev->flush)
1358 hdev->flush(hdev);
1359
1360 /* Reset device */
1361 skb_queue_purge(&hdev->cmd_q);
1362 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1363 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1364 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1365 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1366 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1367 clear_bit(HCI_INIT, &hdev->flags);
1368 }
1369
c347b765
GP
1370 /* flush cmd work */
1371 flush_work(&hdev->cmd_work);
1da177e4
LT
1372
1373 /* Drop queues */
1374 skb_queue_purge(&hdev->rx_q);
1375 skb_queue_purge(&hdev->cmd_q);
1376 skb_queue_purge(&hdev->raw_q);
1377
1378 /* Drop last sent command */
1379 if (hdev->sent_cmd) {
b79f44c1 1380 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1381 kfree_skb(hdev->sent_cmd);
1382 hdev->sent_cmd = NULL;
1383 }
1384
b6ddb638
JH
1385 kfree_skb(hdev->recv_evt);
1386 hdev->recv_evt = NULL;
1387
1da177e4
LT
1388 /* After this point our queues are empty
1389 * and no tasks are scheduled. */
1390 hdev->close(hdev);
1391
35b973c9
JH
1392 /* Clear flags */
1393 hdev->flags = 0;
1394 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1395
bb4b2a9a 1396 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1514b892 1397 hdev->dev_type == HCI_BREDR) {
8ee56540
MH
1398 hci_dev_lock(hdev);
1399 mgmt_powered(hdev, 0);
1400 hci_dev_unlock(hdev);
1401 }
5add6af8 1402
ced5c338 1403 /* Controller radio is available but is currently powered down */
536619e8 1404 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1405
e59fda8d 1406 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1407 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1408
1da177e4
LT
1409 hci_req_unlock(hdev);
1410
1411 hci_dev_put(hdev);
1412 return 0;
1413}
1414
1415int hci_dev_close(__u16 dev)
1416{
1417 struct hci_dev *hdev;
1418 int err;
1419
70f23020
AE
1420 hdev = hci_dev_get(dev);
1421 if (!hdev)
1da177e4 1422 return -ENODEV;
8ee56540 1423
0736cfa8
MH
1424 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1425 err = -EBUSY;
1426 goto done;
1427 }
1428
8ee56540
MH
1429 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1430 cancel_delayed_work(&hdev->power_off);
1431
1da177e4 1432 err = hci_dev_do_close(hdev);
8ee56540 1433
0736cfa8 1434done:
1da177e4
LT
1435 hci_dev_put(hdev);
1436 return err;
1437}
1438
1439int hci_dev_reset(__u16 dev)
1440{
1441 struct hci_dev *hdev;
1442 int ret = 0;
1443
70f23020
AE
1444 hdev = hci_dev_get(dev);
1445 if (!hdev)
1da177e4
LT
1446 return -ENODEV;
1447
1448 hci_req_lock(hdev);
1da177e4 1449
808a049e
MH
1450 if (!test_bit(HCI_UP, &hdev->flags)) {
1451 ret = -ENETDOWN;
1da177e4 1452 goto done;
808a049e 1453 }
1da177e4 1454
0736cfa8
MH
1455 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1456 ret = -EBUSY;
1457 goto done;
1458 }
1459
1da177e4
LT
1460 /* Drop queues */
1461 skb_queue_purge(&hdev->rx_q);
1462 skb_queue_purge(&hdev->cmd_q);
1463
09fd0de5 1464 hci_dev_lock(hdev);
1f9b9a5d 1465 hci_inquiry_cache_flush(hdev);
1da177e4 1466 hci_conn_hash_flush(hdev);
09fd0de5 1467 hci_dev_unlock(hdev);
1da177e4
LT
1468
1469 if (hdev->flush)
1470 hdev->flush(hdev);
1471
8e87d142 1472 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1473 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1474
1475 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1476 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1477
1478done:
1da177e4
LT
1479 hci_req_unlock(hdev);
1480 hci_dev_put(hdev);
1481 return ret;
1482}
1483
1484int hci_dev_reset_stat(__u16 dev)
1485{
1486 struct hci_dev *hdev;
1487 int ret = 0;
1488
70f23020
AE
1489 hdev = hci_dev_get(dev);
1490 if (!hdev)
1da177e4
LT
1491 return -ENODEV;
1492
0736cfa8
MH
1493 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1494 ret = -EBUSY;
1495 goto done;
1496 }
1497
1da177e4
LT
1498 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1499
0736cfa8 1500done:
1da177e4 1501 hci_dev_put(hdev);
1da177e4
LT
1502 return ret;
1503}
1504
1505int hci_dev_cmd(unsigned int cmd, void __user *arg)
1506{
1507 struct hci_dev *hdev;
1508 struct hci_dev_req dr;
1509 int err = 0;
1510
1511 if (copy_from_user(&dr, arg, sizeof(dr)))
1512 return -EFAULT;
1513
70f23020
AE
1514 hdev = hci_dev_get(dr.dev_id);
1515 if (!hdev)
1da177e4
LT
1516 return -ENODEV;
1517
0736cfa8
MH
1518 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1519 err = -EBUSY;
1520 goto done;
1521 }
1522
56f87901
JH
1523 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1524 err = -EOPNOTSUPP;
1525 goto done;
1526 }
1527
1da177e4
LT
1528 switch (cmd) {
1529 case HCISETAUTH:
01178cd4
JH
1530 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1531 HCI_INIT_TIMEOUT);
1da177e4
LT
1532 break;
1533
1534 case HCISETENCRYPT:
1535 if (!lmp_encrypt_capable(hdev)) {
1536 err = -EOPNOTSUPP;
1537 break;
1538 }
1539
1540 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1541 /* Auth must be enabled first */
01178cd4
JH
1542 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1543 HCI_INIT_TIMEOUT);
1da177e4
LT
1544 if (err)
1545 break;
1546 }
1547
01178cd4
JH
1548 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1549 HCI_INIT_TIMEOUT);
1da177e4
LT
1550 break;
1551
1552 case HCISETSCAN:
01178cd4
JH
1553 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1554 HCI_INIT_TIMEOUT);
1da177e4
LT
1555 break;
1556
1da177e4 1557 case HCISETLINKPOL:
01178cd4
JH
1558 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1559 HCI_INIT_TIMEOUT);
1da177e4
LT
1560 break;
1561
1562 case HCISETLINKMODE:
e4e8e37c
MH
1563 hdev->link_mode = ((__u16) dr.dev_opt) &
1564 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1565 break;
1566
1567 case HCISETPTYPE:
1568 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1569 break;
1570
1571 case HCISETACLMTU:
e4e8e37c
MH
1572 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1573 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1574 break;
1575
1576 case HCISETSCOMTU:
e4e8e37c
MH
1577 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1578 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1579 break;
1580
1581 default:
1582 err = -EINVAL;
1583 break;
1584 }
e4e8e37c 1585
0736cfa8 1586done:
1da177e4
LT
1587 hci_dev_put(hdev);
1588 return err;
1589}
1590
1591int hci_get_dev_list(void __user *arg)
1592{
8035ded4 1593 struct hci_dev *hdev;
1da177e4
LT
1594 struct hci_dev_list_req *dl;
1595 struct hci_dev_req *dr;
1da177e4
LT
1596 int n = 0, size, err;
1597 __u16 dev_num;
1598
1599 if (get_user(dev_num, (__u16 __user *) arg))
1600 return -EFAULT;
1601
1602 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1603 return -EINVAL;
1604
1605 size = sizeof(*dl) + dev_num * sizeof(*dr);
1606
70f23020
AE
1607 dl = kzalloc(size, GFP_KERNEL);
1608 if (!dl)
1da177e4
LT
1609 return -ENOMEM;
1610
1611 dr = dl->dev_req;
1612
f20d09d5 1613 read_lock(&hci_dev_list_lock);
8035ded4 1614 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1615 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1616 cancel_delayed_work(&hdev->power_off);
c542a06c 1617
a8b2d5c2
JH
1618 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1619 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1620
1da177e4
LT
1621 (dr + n)->dev_id = hdev->id;
1622 (dr + n)->dev_opt = hdev->flags;
c542a06c 1623
1da177e4
LT
1624 if (++n >= dev_num)
1625 break;
1626 }
f20d09d5 1627 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1628
1629 dl->dev_num = n;
1630 size = sizeof(*dl) + n * sizeof(*dr);
1631
1632 err = copy_to_user(arg, dl, size);
1633 kfree(dl);
1634
1635 return err ? -EFAULT : 0;
1636}
1637
1638int hci_get_dev_info(void __user *arg)
1639{
1640 struct hci_dev *hdev;
1641 struct hci_dev_info di;
1642 int err = 0;
1643
1644 if (copy_from_user(&di, arg, sizeof(di)))
1645 return -EFAULT;
1646
70f23020
AE
1647 hdev = hci_dev_get(di.dev_id);
1648 if (!hdev)
1da177e4
LT
1649 return -ENODEV;
1650
a8b2d5c2 1651 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1652 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1653
a8b2d5c2
JH
1654 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1655 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1656
1da177e4
LT
1657 strcpy(di.name, hdev->name);
1658 di.bdaddr = hdev->bdaddr;
60f2a3ed 1659 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
1660 di.flags = hdev->flags;
1661 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1662 if (lmp_bredr_capable(hdev)) {
1663 di.acl_mtu = hdev->acl_mtu;
1664 di.acl_pkts = hdev->acl_pkts;
1665 di.sco_mtu = hdev->sco_mtu;
1666 di.sco_pkts = hdev->sco_pkts;
1667 } else {
1668 di.acl_mtu = hdev->le_mtu;
1669 di.acl_pkts = hdev->le_pkts;
1670 di.sco_mtu = 0;
1671 di.sco_pkts = 0;
1672 }
1da177e4
LT
1673 di.link_policy = hdev->link_policy;
1674 di.link_mode = hdev->link_mode;
1675
1676 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1677 memcpy(&di.features, &hdev->features, sizeof(di.features));
1678
1679 if (copy_to_user(arg, &di, sizeof(di)))
1680 err = -EFAULT;
1681
1682 hci_dev_put(hdev);
1683
1684 return err;
1685}
1686
1687/* ---- Interface to HCI drivers ---- */
1688
611b30f7
MH
1689static int hci_rfkill_set_block(void *data, bool blocked)
1690{
1691 struct hci_dev *hdev = data;
1692
1693 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1694
0736cfa8
MH
1695 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1696 return -EBUSY;
1697
5e130367
JH
1698 if (blocked) {
1699 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
1700 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1701 hci_dev_do_close(hdev);
5e130367
JH
1702 } else {
1703 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 1704 }
611b30f7
MH
1705
1706 return 0;
1707}
1708
1709static const struct rfkill_ops hci_rfkill_ops = {
1710 .set_block = hci_rfkill_set_block,
1711};
1712
ab81cbf9
JH
1713static void hci_power_on(struct work_struct *work)
1714{
1715 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 1716 int err;
ab81cbf9
JH
1717
1718 BT_DBG("%s", hdev->name);
1719
cbed0ca1 1720 err = hci_dev_do_open(hdev);
96570ffc
JH
1721 if (err < 0) {
1722 mgmt_set_powered_failed(hdev, err);
ab81cbf9 1723 return;
96570ffc 1724 }
ab81cbf9 1725
a5c8f270
MH
1726 /* During the HCI setup phase, a few error conditions are
1727 * ignored and they need to be checked now. If they are still
1728 * valid, it is important to turn the device back off.
1729 */
1730 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1731 (hdev->dev_type == HCI_BREDR &&
1732 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1733 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
1734 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1735 hci_dev_do_close(hdev);
1736 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
1737 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1738 HCI_AUTO_OFF_TIMEOUT);
bf543036 1739 }
ab81cbf9 1740
a8b2d5c2 1741 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1742 mgmt_index_added(hdev);
ab81cbf9
JH
1743}
1744
1745static void hci_power_off(struct work_struct *work)
1746{
3243553f 1747 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1748 power_off.work);
ab81cbf9
JH
1749
1750 BT_DBG("%s", hdev->name);
1751
8ee56540 1752 hci_dev_do_close(hdev);
ab81cbf9
JH
1753}
1754
16ab91ab
JH
1755static void hci_discov_off(struct work_struct *work)
1756{
1757 struct hci_dev *hdev;
1758 u8 scan = SCAN_PAGE;
1759
1760 hdev = container_of(work, struct hci_dev, discov_off.work);
1761
1762 BT_DBG("%s", hdev->name);
1763
09fd0de5 1764 hci_dev_lock(hdev);
16ab91ab
JH
1765
1766 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1767
1768 hdev->discov_timeout = 0;
1769
09fd0de5 1770 hci_dev_unlock(hdev);
16ab91ab
JH
1771}
1772
2aeb9a1a
JH
1773int hci_uuids_clear(struct hci_dev *hdev)
1774{
4821002c 1775 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1776
4821002c
JH
1777 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1778 list_del(&uuid->list);
2aeb9a1a
JH
1779 kfree(uuid);
1780 }
1781
1782 return 0;
1783}
1784
55ed8ca1
JH
1785int hci_link_keys_clear(struct hci_dev *hdev)
1786{
1787 struct list_head *p, *n;
1788
1789 list_for_each_safe(p, n, &hdev->link_keys) {
1790 struct link_key *key;
1791
1792 key = list_entry(p, struct link_key, list);
1793
1794 list_del(p);
1795 kfree(key);
1796 }
1797
1798 return 0;
1799}
1800
b899efaf
VCG
1801int hci_smp_ltks_clear(struct hci_dev *hdev)
1802{
1803 struct smp_ltk *k, *tmp;
1804
1805 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1806 list_del(&k->list);
1807 kfree(k);
1808 }
1809
1810 return 0;
1811}
1812
55ed8ca1
JH
1813struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1814{
8035ded4 1815 struct link_key *k;
55ed8ca1 1816
8035ded4 1817 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1818 if (bacmp(bdaddr, &k->bdaddr) == 0)
1819 return k;
55ed8ca1
JH
1820
1821 return NULL;
1822}
1823
745c0ce3 1824static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1825 u8 key_type, u8 old_key_type)
d25e28ab
JH
1826{
1827 /* Legacy key */
1828 if (key_type < 0x03)
745c0ce3 1829 return true;
d25e28ab
JH
1830
1831 /* Debug keys are insecure so don't store them persistently */
1832 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1833 return false;
d25e28ab
JH
1834
1835 /* Changed combination key and there's no previous one */
1836 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1837 return false;
d25e28ab
JH
1838
1839 /* Security mode 3 case */
1840 if (!conn)
745c0ce3 1841 return true;
d25e28ab
JH
1842
1843 /* Neither local nor remote side had no-bonding as requirement */
1844 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1845 return true;
d25e28ab
JH
1846
1847 /* Local side had dedicated bonding as requirement */
1848 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1849 return true;
d25e28ab
JH
1850
1851 /* Remote side had dedicated bonding as requirement */
1852 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1853 return true;
d25e28ab
JH
1854
1855 /* If none of the above criteria match, then don't store the key
1856 * persistently */
745c0ce3 1857 return false;
d25e28ab
JH
1858}
1859
c9839a11 1860struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1861{
c9839a11 1862 struct smp_ltk *k;
75d262c2 1863
c9839a11
VCG
1864 list_for_each_entry(k, &hdev->long_term_keys, list) {
1865 if (k->ediv != ediv ||
a8c5fb1a 1866 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1867 continue;
1868
c9839a11 1869 return k;
75d262c2
VCG
1870 }
1871
1872 return NULL;
1873}
75d262c2 1874
c9839a11 1875struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1876 u8 addr_type)
75d262c2 1877{
c9839a11 1878 struct smp_ltk *k;
75d262c2 1879
c9839a11
VCG
1880 list_for_each_entry(k, &hdev->long_term_keys, list)
1881 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1882 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1883 return k;
1884
1885 return NULL;
1886}
75d262c2 1887
d25e28ab 1888int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1889 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1890{
1891 struct link_key *key, *old_key;
745c0ce3
VA
1892 u8 old_key_type;
1893 bool persistent;
55ed8ca1
JH
1894
1895 old_key = hci_find_link_key(hdev, bdaddr);
1896 if (old_key) {
1897 old_key_type = old_key->type;
1898 key = old_key;
1899 } else {
12adcf3a 1900 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1901 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1902 if (!key)
1903 return -ENOMEM;
1904 list_add(&key->list, &hdev->link_keys);
1905 }
1906
6ed93dc6 1907 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1908
d25e28ab
JH
1909 /* Some buggy controller combinations generate a changed
1910 * combination key for legacy pairing even when there's no
1911 * previous key */
1912 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1913 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1914 type = HCI_LK_COMBINATION;
655fe6ec
JH
1915 if (conn)
1916 conn->key_type = type;
1917 }
d25e28ab 1918
55ed8ca1 1919 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1920 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1921 key->pin_len = pin_len;
1922
b6020ba0 1923 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1924 key->type = old_key_type;
4748fed2
JH
1925 else
1926 key->type = type;
1927
4df378a1
JH
1928 if (!new_key)
1929 return 0;
1930
1931 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1932
744cf19e 1933 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1934
6ec5bcad
VA
1935 if (conn)
1936 conn->flush_key = !persistent;
55ed8ca1
JH
1937
1938 return 0;
1939}
1940
c9839a11 1941int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1942 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1943 ediv, u8 rand[8])
75d262c2 1944{
c9839a11 1945 struct smp_ltk *key, *old_key;
75d262c2 1946
c9839a11
VCG
1947 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1948 return 0;
75d262c2 1949
c9839a11
VCG
1950 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1951 if (old_key)
75d262c2 1952 key = old_key;
c9839a11
VCG
1953 else {
1954 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1955 if (!key)
1956 return -ENOMEM;
c9839a11 1957 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1958 }
1959
75d262c2 1960 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1961 key->bdaddr_type = addr_type;
1962 memcpy(key->val, tk, sizeof(key->val));
1963 key->authenticated = authenticated;
1964 key->ediv = ediv;
1965 key->enc_size = enc_size;
1966 key->type = type;
1967 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1968
c9839a11
VCG
1969 if (!new_key)
1970 return 0;
75d262c2 1971
261cc5aa
VCG
1972 if (type & HCI_SMP_LTK)
1973 mgmt_new_ltk(hdev, key, 1);
1974
75d262c2
VCG
1975 return 0;
1976}
1977
55ed8ca1
JH
1978int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1979{
1980 struct link_key *key;
1981
1982 key = hci_find_link_key(hdev, bdaddr);
1983 if (!key)
1984 return -ENOENT;
1985
6ed93dc6 1986 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1987
1988 list_del(&key->list);
1989 kfree(key);
1990
1991 return 0;
1992}
1993
b899efaf
VCG
1994int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1995{
1996 struct smp_ltk *k, *tmp;
1997
1998 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1999 if (bacmp(bdaddr, &k->bdaddr))
2000 continue;
2001
6ed93dc6 2002 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2003
2004 list_del(&k->list);
2005 kfree(k);
2006 }
2007
2008 return 0;
2009}
2010
6bd32326 2011/* HCI command timer function */
bda4f23a 2012static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2013{
2014 struct hci_dev *hdev = (void *) arg;
2015
bda4f23a
AE
2016 if (hdev->sent_cmd) {
2017 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2018 u16 opcode = __le16_to_cpu(sent->opcode);
2019
2020 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2021 } else {
2022 BT_ERR("%s command tx timeout", hdev->name);
2023 }
2024
6bd32326 2025 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2026 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2027}
2028
2763eda6 2029struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2030 bdaddr_t *bdaddr)
2763eda6
SJ
2031{
2032 struct oob_data *data;
2033
2034 list_for_each_entry(data, &hdev->remote_oob_data, list)
2035 if (bacmp(bdaddr, &data->bdaddr) == 0)
2036 return data;
2037
2038 return NULL;
2039}
2040
2041int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2042{
2043 struct oob_data *data;
2044
2045 data = hci_find_remote_oob_data(hdev, bdaddr);
2046 if (!data)
2047 return -ENOENT;
2048
6ed93dc6 2049 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2050
2051 list_del(&data->list);
2052 kfree(data);
2053
2054 return 0;
2055}
2056
2057int hci_remote_oob_data_clear(struct hci_dev *hdev)
2058{
2059 struct oob_data *data, *n;
2060
2061 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2062 list_del(&data->list);
2063 kfree(data);
2064 }
2065
2066 return 0;
2067}
2068
2069int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 2070 u8 *randomizer)
2763eda6
SJ
2071{
2072 struct oob_data *data;
2073
2074 data = hci_find_remote_oob_data(hdev, bdaddr);
2075
2076 if (!data) {
2077 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2078 if (!data)
2079 return -ENOMEM;
2080
2081 bacpy(&data->bdaddr, bdaddr);
2082 list_add(&data->list, &hdev->remote_oob_data);
2083 }
2084
2085 memcpy(data->hash, hash, sizeof(data->hash));
2086 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2087
6ed93dc6 2088 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2089
2090 return 0;
2091}
2092
04124681 2093struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 2094{
8035ded4 2095 struct bdaddr_list *b;
b2a66aad 2096
8035ded4 2097 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
2098 if (bacmp(bdaddr, &b->bdaddr) == 0)
2099 return b;
b2a66aad
AJ
2100
2101 return NULL;
2102}
2103
2104int hci_blacklist_clear(struct hci_dev *hdev)
2105{
2106 struct list_head *p, *n;
2107
2108 list_for_each_safe(p, n, &hdev->blacklist) {
2109 struct bdaddr_list *b;
2110
2111 b = list_entry(p, struct bdaddr_list, list);
2112
2113 list_del(p);
2114 kfree(b);
2115 }
2116
2117 return 0;
2118}
2119
88c1fe4b 2120int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2121{
2122 struct bdaddr_list *entry;
b2a66aad
AJ
2123
2124 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2125 return -EBADF;
2126
5e762444
AJ
2127 if (hci_blacklist_lookup(hdev, bdaddr))
2128 return -EEXIST;
b2a66aad
AJ
2129
2130 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2131 if (!entry)
2132 return -ENOMEM;
b2a66aad
AJ
2133
2134 bacpy(&entry->bdaddr, bdaddr);
2135
2136 list_add(&entry->list, &hdev->blacklist);
2137
88c1fe4b 2138 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2139}
2140
88c1fe4b 2141int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2142{
2143 struct bdaddr_list *entry;
b2a66aad 2144
1ec918ce 2145 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 2146 return hci_blacklist_clear(hdev);
b2a66aad
AJ
2147
2148 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 2149 if (!entry)
5e762444 2150 return -ENOENT;
b2a66aad
AJ
2151
2152 list_del(&entry->list);
2153 kfree(entry);
2154
88c1fe4b 2155 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2156}
2157
4c87eaab 2158static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2159{
4c87eaab
AG
2160 if (status) {
2161 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2162
4c87eaab
AG
2163 hci_dev_lock(hdev);
2164 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2165 hci_dev_unlock(hdev);
2166 return;
2167 }
7ba8b4be
AG
2168}
2169
4c87eaab 2170static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2171{
4c87eaab
AG
2172 /* General inquiry access code (GIAC) */
2173 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2174 struct hci_request req;
2175 struct hci_cp_inquiry cp;
7ba8b4be
AG
2176 int err;
2177
4c87eaab
AG
2178 if (status) {
2179 BT_ERR("Failed to disable LE scanning: status %d", status);
2180 return;
2181 }
7ba8b4be 2182
4c87eaab
AG
2183 switch (hdev->discovery.type) {
2184 case DISCOV_TYPE_LE:
2185 hci_dev_lock(hdev);
2186 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2187 hci_dev_unlock(hdev);
2188 break;
7ba8b4be 2189
4c87eaab
AG
2190 case DISCOV_TYPE_INTERLEAVED:
2191 hci_req_init(&req, hdev);
7ba8b4be 2192
4c87eaab
AG
2193 memset(&cp, 0, sizeof(cp));
2194 memcpy(&cp.lap, lap, sizeof(cp.lap));
2195 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2196 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2197
4c87eaab 2198 hci_dev_lock(hdev);
7dbfac1d 2199
4c87eaab 2200 hci_inquiry_cache_flush(hdev);
7dbfac1d 2201
4c87eaab
AG
2202 err = hci_req_run(&req, inquiry_complete);
2203 if (err) {
2204 BT_ERR("Inquiry request failed: err %d", err);
2205 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2206 }
7dbfac1d 2207
4c87eaab
AG
2208 hci_dev_unlock(hdev);
2209 break;
7dbfac1d 2210 }
7dbfac1d
AG
2211}
2212
7ba8b4be
AG
2213static void le_scan_disable_work(struct work_struct *work)
2214{
2215 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2216 le_scan_disable.work);
7ba8b4be 2217 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
2218 struct hci_request req;
2219 int err;
7ba8b4be
AG
2220
2221 BT_DBG("%s", hdev->name);
2222
4c87eaab 2223 hci_req_init(&req, hdev);
28b75a89 2224
7ba8b4be 2225 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
2226 cp.enable = LE_SCAN_DISABLE;
2227 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 2228
4c87eaab
AG
2229 err = hci_req_run(&req, le_scan_disable_work_complete);
2230 if (err)
2231 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2232}
2233
9be0dab7
DH
2234/* Alloc HCI device */
2235struct hci_dev *hci_alloc_dev(void)
2236{
2237 struct hci_dev *hdev;
2238
2239 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2240 if (!hdev)
2241 return NULL;
2242
b1b813d4
DH
2243 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2244 hdev->esco_type = (ESCO_HV1);
2245 hdev->link_mode = (HCI_LM_ACCEPT);
2246 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2247 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2248 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2249
b1b813d4
DH
2250 hdev->sniff_max_interval = 800;
2251 hdev->sniff_min_interval = 80;
2252
2253 mutex_init(&hdev->lock);
2254 mutex_init(&hdev->req_lock);
2255
2256 INIT_LIST_HEAD(&hdev->mgmt_pending);
2257 INIT_LIST_HEAD(&hdev->blacklist);
2258 INIT_LIST_HEAD(&hdev->uuids);
2259 INIT_LIST_HEAD(&hdev->link_keys);
2260 INIT_LIST_HEAD(&hdev->long_term_keys);
2261 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2262 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2263
2264 INIT_WORK(&hdev->rx_work, hci_rx_work);
2265 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2266 INIT_WORK(&hdev->tx_work, hci_tx_work);
2267 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2268
b1b813d4
DH
2269 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2270 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2271 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2272
b1b813d4
DH
2273 skb_queue_head_init(&hdev->rx_q);
2274 skb_queue_head_init(&hdev->cmd_q);
2275 skb_queue_head_init(&hdev->raw_q);
2276
2277 init_waitqueue_head(&hdev->req_wait_q);
2278
bda4f23a 2279 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2280
b1b813d4
DH
2281 hci_init_sysfs(hdev);
2282 discovery_init(hdev);
9be0dab7
DH
2283
2284 return hdev;
2285}
2286EXPORT_SYMBOL(hci_alloc_dev);
2287
2288/* Free HCI device */
2289void hci_free_dev(struct hci_dev *hdev)
2290{
9be0dab7
DH
2291 /* will free via device release */
2292 put_device(&hdev->dev);
2293}
2294EXPORT_SYMBOL(hci_free_dev);
2295
1da177e4
LT
2296/* Register HCI device */
2297int hci_register_dev(struct hci_dev *hdev)
2298{
b1b813d4 2299 int id, error;
1da177e4 2300
010666a1 2301 if (!hdev->open || !hdev->close)
1da177e4
LT
2302 return -EINVAL;
2303
08add513
MM
2304 /* Do not allow HCI_AMP devices to register at index 0,
2305 * so the index can be used as the AMP controller ID.
2306 */
3df92b31
SL
2307 switch (hdev->dev_type) {
2308 case HCI_BREDR:
2309 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2310 break;
2311 case HCI_AMP:
2312 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2313 break;
2314 default:
2315 return -EINVAL;
1da177e4 2316 }
8e87d142 2317
3df92b31
SL
2318 if (id < 0)
2319 return id;
2320
1da177e4
LT
2321 sprintf(hdev->name, "hci%d", id);
2322 hdev->id = id;
2d8b3a11
AE
2323
2324 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2325
d8537548
KC
2326 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2327 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
2328 if (!hdev->workqueue) {
2329 error = -ENOMEM;
2330 goto err;
2331 }
f48fd9c8 2332
d8537548
KC
2333 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2334 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
2335 if (!hdev->req_workqueue) {
2336 destroy_workqueue(hdev->workqueue);
2337 error = -ENOMEM;
2338 goto err;
2339 }
2340
33ca954d
DH
2341 error = hci_add_sysfs(hdev);
2342 if (error < 0)
2343 goto err_wqueue;
1da177e4 2344
611b30f7 2345 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2346 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2347 hdev);
611b30f7
MH
2348 if (hdev->rfkill) {
2349 if (rfkill_register(hdev->rfkill) < 0) {
2350 rfkill_destroy(hdev->rfkill);
2351 hdev->rfkill = NULL;
2352 }
2353 }
2354
5e130367
JH
2355 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2356 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2357
a8b2d5c2 2358 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac 2359
01cd3404 2360 if (hdev->dev_type == HCI_BREDR) {
ce2be9ac 2361 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
56f87901
JH
2362 /* Assume BR/EDR support until proven otherwise (such as
2363 * through reading supported features during init.
2364 */
2365 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2366 }
ce2be9ac 2367
fcee3377
GP
2368 write_lock(&hci_dev_list_lock);
2369 list_add(&hdev->list, &hci_dev_list);
2370 write_unlock(&hci_dev_list_lock);
2371
1da177e4 2372 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2373 hci_dev_hold(hdev);
1da177e4 2374
19202573 2375 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2376
1da177e4 2377 return id;
f48fd9c8 2378
33ca954d
DH
2379err_wqueue:
2380 destroy_workqueue(hdev->workqueue);
6ead1bbc 2381 destroy_workqueue(hdev->req_workqueue);
33ca954d 2382err:
3df92b31 2383 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 2384
33ca954d 2385 return error;
1da177e4
LT
2386}
2387EXPORT_SYMBOL(hci_register_dev);
2388
2389/* Unregister HCI device */
59735631 2390void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2391{
3df92b31 2392 int i, id;
ef222013 2393
c13854ce 2394 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2395
94324962
JH
2396 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2397
3df92b31
SL
2398 id = hdev->id;
2399
f20d09d5 2400 write_lock(&hci_dev_list_lock);
1da177e4 2401 list_del(&hdev->list);
f20d09d5 2402 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2403
2404 hci_dev_do_close(hdev);
2405
cd4c5391 2406 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2407 kfree_skb(hdev->reassembly[i]);
2408
b9b5ef18
GP
2409 cancel_work_sync(&hdev->power_on);
2410
ab81cbf9 2411 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2412 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2413 hci_dev_lock(hdev);
744cf19e 2414 mgmt_index_removed(hdev);
09fd0de5 2415 hci_dev_unlock(hdev);
56e5cb86 2416 }
ab81cbf9 2417
2e58ef3e
JH
2418 /* mgmt_index_removed should take care of emptying the
2419 * pending list */
2420 BUG_ON(!list_empty(&hdev->mgmt_pending));
2421
1da177e4
LT
2422 hci_notify(hdev, HCI_DEV_UNREG);
2423
611b30f7
MH
2424 if (hdev->rfkill) {
2425 rfkill_unregister(hdev->rfkill);
2426 rfkill_destroy(hdev->rfkill);
2427 }
2428
ce242970 2429 hci_del_sysfs(hdev);
147e2d59 2430
f48fd9c8 2431 destroy_workqueue(hdev->workqueue);
6ead1bbc 2432 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2433
09fd0de5 2434 hci_dev_lock(hdev);
e2e0cacb 2435 hci_blacklist_clear(hdev);
2aeb9a1a 2436 hci_uuids_clear(hdev);
55ed8ca1 2437 hci_link_keys_clear(hdev);
b899efaf 2438 hci_smp_ltks_clear(hdev);
2763eda6 2439 hci_remote_oob_data_clear(hdev);
09fd0de5 2440 hci_dev_unlock(hdev);
e2e0cacb 2441
dc946bd8 2442 hci_dev_put(hdev);
3df92b31
SL
2443
2444 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2445}
2446EXPORT_SYMBOL(hci_unregister_dev);
2447
2448/* Suspend HCI device */
2449int hci_suspend_dev(struct hci_dev *hdev)
2450{
2451 hci_notify(hdev, HCI_DEV_SUSPEND);
2452 return 0;
2453}
2454EXPORT_SYMBOL(hci_suspend_dev);
2455
2456/* Resume HCI device */
2457int hci_resume_dev(struct hci_dev *hdev)
2458{
2459 hci_notify(hdev, HCI_DEV_RESUME);
2460 return 0;
2461}
2462EXPORT_SYMBOL(hci_resume_dev);
2463
76bca880
MH
2464/* Receive frame from HCI drivers */
2465int hci_recv_frame(struct sk_buff *skb)
2466{
2467 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2468 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2469 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2470 kfree_skb(skb);
2471 return -ENXIO;
2472 }
2473
d82603c6 2474 /* Incoming skb */
76bca880
MH
2475 bt_cb(skb)->incoming = 1;
2476
2477 /* Time stamp */
2478 __net_timestamp(skb);
2479
76bca880 2480 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2481 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2482
76bca880
MH
2483 return 0;
2484}
2485EXPORT_SYMBOL(hci_recv_frame);
2486
33e882a5 2487static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2488 int count, __u8 index)
33e882a5
SS
2489{
2490 int len = 0;
2491 int hlen = 0;
2492 int remain = count;
2493 struct sk_buff *skb;
2494 struct bt_skb_cb *scb;
2495
2496 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2497 index >= NUM_REASSEMBLY)
33e882a5
SS
2498 return -EILSEQ;
2499
2500 skb = hdev->reassembly[index];
2501
2502 if (!skb) {
2503 switch (type) {
2504 case HCI_ACLDATA_PKT:
2505 len = HCI_MAX_FRAME_SIZE;
2506 hlen = HCI_ACL_HDR_SIZE;
2507 break;
2508 case HCI_EVENT_PKT:
2509 len = HCI_MAX_EVENT_SIZE;
2510 hlen = HCI_EVENT_HDR_SIZE;
2511 break;
2512 case HCI_SCODATA_PKT:
2513 len = HCI_MAX_SCO_SIZE;
2514 hlen = HCI_SCO_HDR_SIZE;
2515 break;
2516 }
2517
1e429f38 2518 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2519 if (!skb)
2520 return -ENOMEM;
2521
2522 scb = (void *) skb->cb;
2523 scb->expect = hlen;
2524 scb->pkt_type = type;
2525
2526 skb->dev = (void *) hdev;
2527 hdev->reassembly[index] = skb;
2528 }
2529
2530 while (count) {
2531 scb = (void *) skb->cb;
89bb46d0 2532 len = min_t(uint, scb->expect, count);
33e882a5
SS
2533
2534 memcpy(skb_put(skb, len), data, len);
2535
2536 count -= len;
2537 data += len;
2538 scb->expect -= len;
2539 remain = count;
2540
2541 switch (type) {
2542 case HCI_EVENT_PKT:
2543 if (skb->len == HCI_EVENT_HDR_SIZE) {
2544 struct hci_event_hdr *h = hci_event_hdr(skb);
2545 scb->expect = h->plen;
2546
2547 if (skb_tailroom(skb) < scb->expect) {
2548 kfree_skb(skb);
2549 hdev->reassembly[index] = NULL;
2550 return -ENOMEM;
2551 }
2552 }
2553 break;
2554
2555 case HCI_ACLDATA_PKT:
2556 if (skb->len == HCI_ACL_HDR_SIZE) {
2557 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2558 scb->expect = __le16_to_cpu(h->dlen);
2559
2560 if (skb_tailroom(skb) < scb->expect) {
2561 kfree_skb(skb);
2562 hdev->reassembly[index] = NULL;
2563 return -ENOMEM;
2564 }
2565 }
2566 break;
2567
2568 case HCI_SCODATA_PKT:
2569 if (skb->len == HCI_SCO_HDR_SIZE) {
2570 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2571 scb->expect = h->dlen;
2572
2573 if (skb_tailroom(skb) < scb->expect) {
2574 kfree_skb(skb);
2575 hdev->reassembly[index] = NULL;
2576 return -ENOMEM;
2577 }
2578 }
2579 break;
2580 }
2581
2582 if (scb->expect == 0) {
2583 /* Complete frame */
2584
2585 bt_cb(skb)->pkt_type = type;
2586 hci_recv_frame(skb);
2587
2588 hdev->reassembly[index] = NULL;
2589 return remain;
2590 }
2591 }
2592
2593 return remain;
2594}
2595
ef222013
MH
2596int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2597{
f39a3c06
SS
2598 int rem = 0;
2599
ef222013
MH
2600 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2601 return -EILSEQ;
2602
da5f6c37 2603 while (count) {
1e429f38 2604 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2605 if (rem < 0)
2606 return rem;
ef222013 2607
f39a3c06
SS
2608 data += (count - rem);
2609 count = rem;
f81c6224 2610 }
ef222013 2611
f39a3c06 2612 return rem;
ef222013
MH
2613}
2614EXPORT_SYMBOL(hci_recv_fragment);
2615
99811510
SS
2616#define STREAM_REASSEMBLY 0
2617
2618int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2619{
2620 int type;
2621 int rem = 0;
2622
da5f6c37 2623 while (count) {
99811510
SS
2624 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2625
2626 if (!skb) {
2627 struct { char type; } *pkt;
2628
2629 /* Start of the frame */
2630 pkt = data;
2631 type = pkt->type;
2632
2633 data++;
2634 count--;
2635 } else
2636 type = bt_cb(skb)->pkt_type;
2637
1e429f38 2638 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2639 STREAM_REASSEMBLY);
99811510
SS
2640 if (rem < 0)
2641 return rem;
2642
2643 data += (count - rem);
2644 count = rem;
f81c6224 2645 }
99811510
SS
2646
2647 return rem;
2648}
2649EXPORT_SYMBOL(hci_recv_stream_fragment);
2650
1da177e4
LT
2651/* ---- Interface to upper protocols ---- */
2652
1da177e4
LT
2653int hci_register_cb(struct hci_cb *cb)
2654{
2655 BT_DBG("%p name %s", cb, cb->name);
2656
f20d09d5 2657 write_lock(&hci_cb_list_lock);
1da177e4 2658 list_add(&cb->list, &hci_cb_list);
f20d09d5 2659 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2660
2661 return 0;
2662}
2663EXPORT_SYMBOL(hci_register_cb);
2664
2665int hci_unregister_cb(struct hci_cb *cb)
2666{
2667 BT_DBG("%p name %s", cb, cb->name);
2668
f20d09d5 2669 write_lock(&hci_cb_list_lock);
1da177e4 2670 list_del(&cb->list);
f20d09d5 2671 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2672
2673 return 0;
2674}
2675EXPORT_SYMBOL(hci_unregister_cb);
2676
2677static int hci_send_frame(struct sk_buff *skb)
2678{
2679 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2680
2681 if (!hdev) {
2682 kfree_skb(skb);
2683 return -ENODEV;
2684 }
2685
0d48d939 2686 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2687
cd82e61c
MH
2688 /* Time stamp */
2689 __net_timestamp(skb);
1da177e4 2690
cd82e61c
MH
2691 /* Send copy to monitor */
2692 hci_send_to_monitor(hdev, skb);
2693
2694 if (atomic_read(&hdev->promisc)) {
2695 /* Send copy to the sockets */
470fe1b5 2696 hci_send_to_sock(hdev, skb);
1da177e4
LT
2697 }
2698
2699 /* Get rid of skb owner, prior to sending to the driver. */
2700 skb_orphan(skb);
2701
2702 return hdev->send(skb);
2703}
2704
3119ae95
JH
2705void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2706{
2707 skb_queue_head_init(&req->cmd_q);
2708 req->hdev = hdev;
5d73e034 2709 req->err = 0;
3119ae95
JH
2710}
2711
2712int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2713{
2714 struct hci_dev *hdev = req->hdev;
2715 struct sk_buff *skb;
2716 unsigned long flags;
2717
2718 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2719
5d73e034
AG
2720 /* If an error occured during request building, remove all HCI
2721 * commands queued on the HCI request queue.
2722 */
2723 if (req->err) {
2724 skb_queue_purge(&req->cmd_q);
2725 return req->err;
2726 }
2727
3119ae95
JH
2728 /* Do not allow empty requests */
2729 if (skb_queue_empty(&req->cmd_q))
382b0c39 2730 return -ENODATA;
3119ae95
JH
2731
2732 skb = skb_peek_tail(&req->cmd_q);
2733 bt_cb(skb)->req.complete = complete;
2734
2735 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2736 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2737 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2738
2739 queue_work(hdev->workqueue, &hdev->cmd_work);
2740
2741 return 0;
2742}
2743
1ca3a9d0 2744static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 2745 u32 plen, const void *param)
1da177e4
LT
2746{
2747 int len = HCI_COMMAND_HDR_SIZE + plen;
2748 struct hci_command_hdr *hdr;
2749 struct sk_buff *skb;
2750
1da177e4 2751 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2752 if (!skb)
2753 return NULL;
1da177e4
LT
2754
2755 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2756 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2757 hdr->plen = plen;
2758
2759 if (plen)
2760 memcpy(skb_put(skb, plen), param, plen);
2761
2762 BT_DBG("skb len %d", skb->len);
2763
0d48d939 2764 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2765 skb->dev = (void *) hdev;
c78ae283 2766
1ca3a9d0
JH
2767 return skb;
2768}
2769
2770/* Send HCI command */
07dc93dd
JH
2771int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2772 const void *param)
1ca3a9d0
JH
2773{
2774 struct sk_buff *skb;
2775
2776 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2777
2778 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2779 if (!skb) {
2780 BT_ERR("%s no memory for command", hdev->name);
2781 return -ENOMEM;
2782 }
2783
11714b3d
JH
2784 /* Stand-alone HCI commands must be flaged as
2785 * single-command requests.
2786 */
2787 bt_cb(skb)->req.start = true;
2788
1da177e4 2789 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2790 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2791
2792 return 0;
2793}
1da177e4 2794
71c76a17 2795/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
2796void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2797 const void *param, u8 event)
71c76a17
JH
2798{
2799 struct hci_dev *hdev = req->hdev;
2800 struct sk_buff *skb;
2801
2802 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2803
34739c1e
AG
2804 /* If an error occured during request building, there is no point in
2805 * queueing the HCI command. We can simply return.
2806 */
2807 if (req->err)
2808 return;
2809
71c76a17
JH
2810 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2811 if (!skb) {
5d73e034
AG
2812 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2813 hdev->name, opcode);
2814 req->err = -ENOMEM;
e348fe6b 2815 return;
71c76a17
JH
2816 }
2817
2818 if (skb_queue_empty(&req->cmd_q))
2819 bt_cb(skb)->req.start = true;
2820
02350a72
JH
2821 bt_cb(skb)->req.event = event;
2822
71c76a17 2823 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2824}
2825
07dc93dd
JH
2826void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2827 const void *param)
02350a72
JH
2828{
2829 hci_req_add_ev(req, opcode, plen, param, 0);
2830}
2831
1da177e4 2832/* Get data from the previously sent command */
a9de9248 2833void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2834{
2835 struct hci_command_hdr *hdr;
2836
2837 if (!hdev->sent_cmd)
2838 return NULL;
2839
2840 hdr = (void *) hdev->sent_cmd->data;
2841
a9de9248 2842 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2843 return NULL;
2844
f0e09510 2845 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2846
2847 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2848}
2849
2850/* Send ACL data */
2851static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2852{
2853 struct hci_acl_hdr *hdr;
2854 int len = skb->len;
2855
badff6d0
ACM
2856 skb_push(skb, HCI_ACL_HDR_SIZE);
2857 skb_reset_transport_header(skb);
9c70220b 2858 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2859 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2860 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2861}
2862
ee22be7e 2863static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2864 struct sk_buff *skb, __u16 flags)
1da177e4 2865{
ee22be7e 2866 struct hci_conn *conn = chan->conn;
1da177e4
LT
2867 struct hci_dev *hdev = conn->hdev;
2868 struct sk_buff *list;
2869
087bfd99
GP
2870 skb->len = skb_headlen(skb);
2871 skb->data_len = 0;
2872
2873 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2874
2875 switch (hdev->dev_type) {
2876 case HCI_BREDR:
2877 hci_add_acl_hdr(skb, conn->handle, flags);
2878 break;
2879 case HCI_AMP:
2880 hci_add_acl_hdr(skb, chan->handle, flags);
2881 break;
2882 default:
2883 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2884 return;
2885 }
087bfd99 2886
70f23020
AE
2887 list = skb_shinfo(skb)->frag_list;
2888 if (!list) {
1da177e4
LT
2889 /* Non fragmented */
2890 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2891
73d80deb 2892 skb_queue_tail(queue, skb);
1da177e4
LT
2893 } else {
2894 /* Fragmented */
2895 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2896
2897 skb_shinfo(skb)->frag_list = NULL;
2898
2899 /* Queue all fragments atomically */
af3e6359 2900 spin_lock(&queue->lock);
1da177e4 2901
73d80deb 2902 __skb_queue_tail(queue, skb);
e702112f
AE
2903
2904 flags &= ~ACL_START;
2905 flags |= ACL_CONT;
1da177e4
LT
2906 do {
2907 skb = list; list = list->next;
8e87d142 2908
1da177e4 2909 skb->dev = (void *) hdev;
0d48d939 2910 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2911 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2912
2913 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2914
73d80deb 2915 __skb_queue_tail(queue, skb);
1da177e4
LT
2916 } while (list);
2917
af3e6359 2918 spin_unlock(&queue->lock);
1da177e4 2919 }
73d80deb
LAD
2920}
2921
2922void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2923{
ee22be7e 2924 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2925
f0e09510 2926 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2927
2928 skb->dev = (void *) hdev;
73d80deb 2929
ee22be7e 2930 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2931
3eff45ea 2932 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2933}
1da177e4
LT
2934
2935/* Send SCO data */
0d861d8b 2936void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2937{
2938 struct hci_dev *hdev = conn->hdev;
2939 struct hci_sco_hdr hdr;
2940
2941 BT_DBG("%s len %d", hdev->name, skb->len);
2942
aca3192c 2943 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2944 hdr.dlen = skb->len;
2945
badff6d0
ACM
2946 skb_push(skb, HCI_SCO_HDR_SIZE);
2947 skb_reset_transport_header(skb);
9c70220b 2948 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2949
2950 skb->dev = (void *) hdev;
0d48d939 2951 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2952
1da177e4 2953 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2954 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2955}
1da177e4
LT
2956
2957/* ---- HCI TX task (outgoing data) ---- */
2958
2959/* HCI Connection scheduler */
6039aa73
GP
2960static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2961 int *quote)
1da177e4
LT
2962{
2963 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2964 struct hci_conn *conn = NULL, *c;
abc5de8f 2965 unsigned int num = 0, min = ~0;
1da177e4 2966
8e87d142 2967 /* We don't have to lock device here. Connections are always
1da177e4 2968 * added and removed with TX task disabled. */
bf4c6325
GP
2969
2970 rcu_read_lock();
2971
2972 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2973 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2974 continue;
769be974
MH
2975
2976 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2977 continue;
2978
1da177e4
LT
2979 num++;
2980
2981 if (c->sent < min) {
2982 min = c->sent;
2983 conn = c;
2984 }
52087a79
LAD
2985
2986 if (hci_conn_num(hdev, type) == num)
2987 break;
1da177e4
LT
2988 }
2989
bf4c6325
GP
2990 rcu_read_unlock();
2991
1da177e4 2992 if (conn) {
6ed58ec5
VT
2993 int cnt, q;
2994
2995 switch (conn->type) {
2996 case ACL_LINK:
2997 cnt = hdev->acl_cnt;
2998 break;
2999 case SCO_LINK:
3000 case ESCO_LINK:
3001 cnt = hdev->sco_cnt;
3002 break;
3003 case LE_LINK:
3004 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3005 break;
3006 default:
3007 cnt = 0;
3008 BT_ERR("Unknown link type");
3009 }
3010
3011 q = cnt / num;
1da177e4
LT
3012 *quote = q ? q : 1;
3013 } else
3014 *quote = 0;
3015
3016 BT_DBG("conn %p quote %d", conn, *quote);
3017 return conn;
3018}
3019
6039aa73 3020static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3021{
3022 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3023 struct hci_conn *c;
1da177e4 3024
bae1f5d9 3025 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3026
bf4c6325
GP
3027 rcu_read_lock();
3028
1da177e4 3029 /* Kill stalled connections */
bf4c6325 3030 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3031 if (c->type == type && c->sent) {
6ed93dc6
AE
3032 BT_ERR("%s killing stalled connection %pMR",
3033 hdev->name, &c->dst);
bed71748 3034 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3035 }
3036 }
bf4c6325
GP
3037
3038 rcu_read_unlock();
1da177e4
LT
3039}
3040
6039aa73
GP
3041static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3042 int *quote)
1da177e4 3043{
73d80deb
LAD
3044 struct hci_conn_hash *h = &hdev->conn_hash;
3045 struct hci_chan *chan = NULL;
abc5de8f 3046 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3047 struct hci_conn *conn;
73d80deb
LAD
3048 int cnt, q, conn_num = 0;
3049
3050 BT_DBG("%s", hdev->name);
3051
bf4c6325
GP
3052 rcu_read_lock();
3053
3054 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3055 struct hci_chan *tmp;
3056
3057 if (conn->type != type)
3058 continue;
3059
3060 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3061 continue;
3062
3063 conn_num++;
3064
8192edef 3065 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3066 struct sk_buff *skb;
3067
3068 if (skb_queue_empty(&tmp->data_q))
3069 continue;
3070
3071 skb = skb_peek(&tmp->data_q);
3072 if (skb->priority < cur_prio)
3073 continue;
3074
3075 if (skb->priority > cur_prio) {
3076 num = 0;
3077 min = ~0;
3078 cur_prio = skb->priority;
3079 }
3080
3081 num++;
3082
3083 if (conn->sent < min) {
3084 min = conn->sent;
3085 chan = tmp;
3086 }
3087 }
3088
3089 if (hci_conn_num(hdev, type) == conn_num)
3090 break;
3091 }
3092
bf4c6325
GP
3093 rcu_read_unlock();
3094
73d80deb
LAD
3095 if (!chan)
3096 return NULL;
3097
3098 switch (chan->conn->type) {
3099 case ACL_LINK:
3100 cnt = hdev->acl_cnt;
3101 break;
bd1eb66b
AE
3102 case AMP_LINK:
3103 cnt = hdev->block_cnt;
3104 break;
73d80deb
LAD
3105 case SCO_LINK:
3106 case ESCO_LINK:
3107 cnt = hdev->sco_cnt;
3108 break;
3109 case LE_LINK:
3110 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3111 break;
3112 default:
3113 cnt = 0;
3114 BT_ERR("Unknown link type");
3115 }
3116
3117 q = cnt / num;
3118 *quote = q ? q : 1;
3119 BT_DBG("chan %p quote %d", chan, *quote);
3120 return chan;
3121}
3122
02b20f0b
LAD
3123static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3124{
3125 struct hci_conn_hash *h = &hdev->conn_hash;
3126 struct hci_conn *conn;
3127 int num = 0;
3128
3129 BT_DBG("%s", hdev->name);
3130
bf4c6325
GP
3131 rcu_read_lock();
3132
3133 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3134 struct hci_chan *chan;
3135
3136 if (conn->type != type)
3137 continue;
3138
3139 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3140 continue;
3141
3142 num++;
3143
8192edef 3144 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3145 struct sk_buff *skb;
3146
3147 if (chan->sent) {
3148 chan->sent = 0;
3149 continue;
3150 }
3151
3152 if (skb_queue_empty(&chan->data_q))
3153 continue;
3154
3155 skb = skb_peek(&chan->data_q);
3156 if (skb->priority >= HCI_PRIO_MAX - 1)
3157 continue;
3158
3159 skb->priority = HCI_PRIO_MAX - 1;
3160
3161 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3162 skb->priority);
02b20f0b
LAD
3163 }
3164
3165 if (hci_conn_num(hdev, type) == num)
3166 break;
3167 }
bf4c6325
GP
3168
3169 rcu_read_unlock();
3170
02b20f0b
LAD
3171}
3172
b71d385a
AE
3173static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3174{
3175 /* Calculate count of blocks used by this packet */
3176 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3177}
3178
6039aa73 3179static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3180{
1da177e4
LT
3181 if (!test_bit(HCI_RAW, &hdev->flags)) {
3182 /* ACL tx timeout must be longer than maximum
3183 * link supervision timeout (40.9 seconds) */
63d2bc1b 3184 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3185 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3186 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3187 }
63d2bc1b 3188}
1da177e4 3189
6039aa73 3190static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3191{
3192 unsigned int cnt = hdev->acl_cnt;
3193 struct hci_chan *chan;
3194 struct sk_buff *skb;
3195 int quote;
3196
3197 __check_timeout(hdev, cnt);
04837f64 3198
73d80deb 3199 while (hdev->acl_cnt &&
a8c5fb1a 3200 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3201 u32 priority = (skb_peek(&chan->data_q))->priority;
3202 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3203 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3204 skb->len, skb->priority);
73d80deb 3205
ec1cce24
LAD
3206 /* Stop if priority has changed */
3207 if (skb->priority < priority)
3208 break;
3209
3210 skb = skb_dequeue(&chan->data_q);
3211
73d80deb 3212 hci_conn_enter_active_mode(chan->conn,
04124681 3213 bt_cb(skb)->force_active);
04837f64 3214
1da177e4
LT
3215 hci_send_frame(skb);
3216 hdev->acl_last_tx = jiffies;
3217
3218 hdev->acl_cnt--;
73d80deb
LAD
3219 chan->sent++;
3220 chan->conn->sent++;
1da177e4
LT
3221 }
3222 }
02b20f0b
LAD
3223
3224 if (cnt != hdev->acl_cnt)
3225 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3226}
3227
6039aa73 3228static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3229{
63d2bc1b 3230 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3231 struct hci_chan *chan;
3232 struct sk_buff *skb;
3233 int quote;
bd1eb66b 3234 u8 type;
b71d385a 3235
63d2bc1b 3236 __check_timeout(hdev, cnt);
b71d385a 3237
bd1eb66b
AE
3238 BT_DBG("%s", hdev->name);
3239
3240 if (hdev->dev_type == HCI_AMP)
3241 type = AMP_LINK;
3242 else
3243 type = ACL_LINK;
3244
b71d385a 3245 while (hdev->block_cnt > 0 &&
bd1eb66b 3246 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3247 u32 priority = (skb_peek(&chan->data_q))->priority;
3248 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3249 int blocks;
3250
3251 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3252 skb->len, skb->priority);
b71d385a
AE
3253
3254 /* Stop if priority has changed */
3255 if (skb->priority < priority)
3256 break;
3257
3258 skb = skb_dequeue(&chan->data_q);
3259
3260 blocks = __get_blocks(hdev, skb);
3261 if (blocks > hdev->block_cnt)
3262 return;
3263
3264 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3265 bt_cb(skb)->force_active);
b71d385a
AE
3266
3267 hci_send_frame(skb);
3268 hdev->acl_last_tx = jiffies;
3269
3270 hdev->block_cnt -= blocks;
3271 quote -= blocks;
3272
3273 chan->sent += blocks;
3274 chan->conn->sent += blocks;
3275 }
3276 }
3277
3278 if (cnt != hdev->block_cnt)
bd1eb66b 3279 hci_prio_recalculate(hdev, type);
b71d385a
AE
3280}
3281
6039aa73 3282static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3283{
3284 BT_DBG("%s", hdev->name);
3285
bd1eb66b
AE
3286 /* No ACL link over BR/EDR controller */
3287 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3288 return;
3289
3290 /* No AMP link over AMP controller */
3291 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3292 return;
3293
3294 switch (hdev->flow_ctl_mode) {
3295 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3296 hci_sched_acl_pkt(hdev);
3297 break;
3298
3299 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3300 hci_sched_acl_blk(hdev);
3301 break;
3302 }
3303}
3304
1da177e4 3305/* Schedule SCO */
6039aa73 3306static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3307{
3308 struct hci_conn *conn;
3309 struct sk_buff *skb;
3310 int quote;
3311
3312 BT_DBG("%s", hdev->name);
3313
52087a79
LAD
3314 if (!hci_conn_num(hdev, SCO_LINK))
3315 return;
3316
1da177e4
LT
3317 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3318 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3319 BT_DBG("skb %p len %d", skb, skb->len);
3320 hci_send_frame(skb);
3321
3322 conn->sent++;
3323 if (conn->sent == ~0)
3324 conn->sent = 0;
3325 }
3326 }
3327}
3328
6039aa73 3329static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3330{
3331 struct hci_conn *conn;
3332 struct sk_buff *skb;
3333 int quote;
3334
3335 BT_DBG("%s", hdev->name);
3336
52087a79
LAD
3337 if (!hci_conn_num(hdev, ESCO_LINK))
3338 return;
3339
8fc9ced3
GP
3340 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3341 &quote))) {
b6a0dc82
MH
3342 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3343 BT_DBG("skb %p len %d", skb, skb->len);
3344 hci_send_frame(skb);
3345
3346 conn->sent++;
3347 if (conn->sent == ~0)
3348 conn->sent = 0;
3349 }
3350 }
3351}
3352
6039aa73 3353static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3354{
73d80deb 3355 struct hci_chan *chan;
6ed58ec5 3356 struct sk_buff *skb;
02b20f0b 3357 int quote, cnt, tmp;
6ed58ec5
VT
3358
3359 BT_DBG("%s", hdev->name);
3360
52087a79
LAD
3361 if (!hci_conn_num(hdev, LE_LINK))
3362 return;
3363
6ed58ec5
VT
3364 if (!test_bit(HCI_RAW, &hdev->flags)) {
3365 /* LE tx timeout must be longer than maximum
3366 * link supervision timeout (40.9 seconds) */
bae1f5d9 3367 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3368 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3369 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3370 }
3371
3372 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3373 tmp = cnt;
73d80deb 3374 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3375 u32 priority = (skb_peek(&chan->data_q))->priority;
3376 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3377 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3378 skb->len, skb->priority);
6ed58ec5 3379
ec1cce24
LAD
3380 /* Stop if priority has changed */
3381 if (skb->priority < priority)
3382 break;
3383
3384 skb = skb_dequeue(&chan->data_q);
3385
6ed58ec5
VT
3386 hci_send_frame(skb);
3387 hdev->le_last_tx = jiffies;
3388
3389 cnt--;
73d80deb
LAD
3390 chan->sent++;
3391 chan->conn->sent++;
6ed58ec5
VT
3392 }
3393 }
73d80deb 3394
6ed58ec5
VT
3395 if (hdev->le_pkts)
3396 hdev->le_cnt = cnt;
3397 else
3398 hdev->acl_cnt = cnt;
02b20f0b
LAD
3399
3400 if (cnt != tmp)
3401 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3402}
3403
3eff45ea 3404static void hci_tx_work(struct work_struct *work)
1da177e4 3405{
3eff45ea 3406 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3407 struct sk_buff *skb;
3408
6ed58ec5 3409 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3410 hdev->sco_cnt, hdev->le_cnt);
1da177e4 3411
52de599e
MH
3412 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3413 /* Schedule queues and send stuff to HCI driver */
3414 hci_sched_acl(hdev);
3415 hci_sched_sco(hdev);
3416 hci_sched_esco(hdev);
3417 hci_sched_le(hdev);
3418 }
6ed58ec5 3419
1da177e4
LT
3420 /* Send next queued raw (unknown type) packet */
3421 while ((skb = skb_dequeue(&hdev->raw_q)))
3422 hci_send_frame(skb);
1da177e4
LT
3423}
3424
25985edc 3425/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3426
3427/* ACL data packet */
6039aa73 3428static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3429{
3430 struct hci_acl_hdr *hdr = (void *) skb->data;
3431 struct hci_conn *conn;
3432 __u16 handle, flags;
3433
3434 skb_pull(skb, HCI_ACL_HDR_SIZE);
3435
3436 handle = __le16_to_cpu(hdr->handle);
3437 flags = hci_flags(handle);
3438 handle = hci_handle(handle);
3439
f0e09510 3440 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3441 handle, flags);
1da177e4
LT
3442
3443 hdev->stat.acl_rx++;
3444
3445 hci_dev_lock(hdev);
3446 conn = hci_conn_hash_lookup_handle(hdev, handle);
3447 hci_dev_unlock(hdev);
8e87d142 3448
1da177e4 3449 if (conn) {
65983fc7 3450 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3451
1da177e4 3452 /* Send to upper protocol */
686ebf28
UF
3453 l2cap_recv_acldata(conn, skb, flags);
3454 return;
1da177e4 3455 } else {
8e87d142 3456 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3457 hdev->name, handle);
1da177e4
LT
3458 }
3459
3460 kfree_skb(skb);
3461}
3462
3463/* SCO data packet */
6039aa73 3464static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3465{
3466 struct hci_sco_hdr *hdr = (void *) skb->data;
3467 struct hci_conn *conn;
3468 __u16 handle;
3469
3470 skb_pull(skb, HCI_SCO_HDR_SIZE);
3471
3472 handle = __le16_to_cpu(hdr->handle);
3473
f0e09510 3474 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3475
3476 hdev->stat.sco_rx++;
3477
3478 hci_dev_lock(hdev);
3479 conn = hci_conn_hash_lookup_handle(hdev, handle);
3480 hci_dev_unlock(hdev);
3481
3482 if (conn) {
1da177e4 3483 /* Send to upper protocol */
686ebf28
UF
3484 sco_recv_scodata(conn, skb);
3485 return;
1da177e4 3486 } else {
8e87d142 3487 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3488 hdev->name, handle);
1da177e4
LT
3489 }
3490
3491 kfree_skb(skb);
3492}
3493
9238f36a
JH
3494static bool hci_req_is_complete(struct hci_dev *hdev)
3495{
3496 struct sk_buff *skb;
3497
3498 skb = skb_peek(&hdev->cmd_q);
3499 if (!skb)
3500 return true;
3501
3502 return bt_cb(skb)->req.start;
3503}
3504
42c6b129
JH
3505static void hci_resend_last(struct hci_dev *hdev)
3506{
3507 struct hci_command_hdr *sent;
3508 struct sk_buff *skb;
3509 u16 opcode;
3510
3511 if (!hdev->sent_cmd)
3512 return;
3513
3514 sent = (void *) hdev->sent_cmd->data;
3515 opcode = __le16_to_cpu(sent->opcode);
3516 if (opcode == HCI_OP_RESET)
3517 return;
3518
3519 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3520 if (!skb)
3521 return;
3522
3523 skb_queue_head(&hdev->cmd_q, skb);
3524 queue_work(hdev->workqueue, &hdev->cmd_work);
3525}
3526
9238f36a
JH
3527void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3528{
3529 hci_req_complete_t req_complete = NULL;
3530 struct sk_buff *skb;
3531 unsigned long flags;
3532
3533 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3534
42c6b129
JH
3535 /* If the completed command doesn't match the last one that was
3536 * sent we need to do special handling of it.
9238f36a 3537 */
42c6b129
JH
3538 if (!hci_sent_cmd_data(hdev, opcode)) {
3539 /* Some CSR based controllers generate a spontaneous
3540 * reset complete event during init and any pending
3541 * command will never be completed. In such a case we
3542 * need to resend whatever was the last sent
3543 * command.
3544 */
3545 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3546 hci_resend_last(hdev);
3547
9238f36a 3548 return;
42c6b129 3549 }
9238f36a
JH
3550
3551 /* If the command succeeded and there's still more commands in
3552 * this request the request is not yet complete.
3553 */
3554 if (!status && !hci_req_is_complete(hdev))
3555 return;
3556
3557 /* If this was the last command in a request the complete
3558 * callback would be found in hdev->sent_cmd instead of the
3559 * command queue (hdev->cmd_q).
3560 */
3561 if (hdev->sent_cmd) {
3562 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
3563
3564 if (req_complete) {
3565 /* We must set the complete callback to NULL to
3566 * avoid calling the callback more than once if
3567 * this function gets called again.
3568 */
3569 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3570
9238f36a 3571 goto call_complete;
53e21fbc 3572 }
9238f36a
JH
3573 }
3574
3575 /* Remove all pending commands belonging to this request */
3576 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3577 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3578 if (bt_cb(skb)->req.start) {
3579 __skb_queue_head(&hdev->cmd_q, skb);
3580 break;
3581 }
3582
3583 req_complete = bt_cb(skb)->req.complete;
3584 kfree_skb(skb);
3585 }
3586 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3587
3588call_complete:
3589 if (req_complete)
3590 req_complete(hdev, status);
3591}
3592
b78752cc 3593static void hci_rx_work(struct work_struct *work)
1da177e4 3594{
b78752cc 3595 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3596 struct sk_buff *skb;
3597
3598 BT_DBG("%s", hdev->name);
3599
1da177e4 3600 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3601 /* Send copy to monitor */
3602 hci_send_to_monitor(hdev, skb);
3603
1da177e4
LT
3604 if (atomic_read(&hdev->promisc)) {
3605 /* Send copy to the sockets */
470fe1b5 3606 hci_send_to_sock(hdev, skb);
1da177e4
LT
3607 }
3608
0736cfa8
MH
3609 if (test_bit(HCI_RAW, &hdev->flags) ||
3610 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
3611 kfree_skb(skb);
3612 continue;
3613 }
3614
3615 if (test_bit(HCI_INIT, &hdev->flags)) {
3616 /* Don't process data packets in this states. */
0d48d939 3617 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3618 case HCI_ACLDATA_PKT:
3619 case HCI_SCODATA_PKT:
3620 kfree_skb(skb);
3621 continue;
3ff50b79 3622 }
1da177e4
LT
3623 }
3624
3625 /* Process frame */
0d48d939 3626 switch (bt_cb(skb)->pkt_type) {
1da177e4 3627 case HCI_EVENT_PKT:
b78752cc 3628 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3629 hci_event_packet(hdev, skb);
3630 break;
3631
3632 case HCI_ACLDATA_PKT:
3633 BT_DBG("%s ACL data packet", hdev->name);
3634 hci_acldata_packet(hdev, skb);
3635 break;
3636
3637 case HCI_SCODATA_PKT:
3638 BT_DBG("%s SCO data packet", hdev->name);
3639 hci_scodata_packet(hdev, skb);
3640 break;
3641
3642 default:
3643 kfree_skb(skb);
3644 break;
3645 }
3646 }
1da177e4
LT
3647}
3648
c347b765 3649static void hci_cmd_work(struct work_struct *work)
1da177e4 3650{
c347b765 3651 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3652 struct sk_buff *skb;
3653
2104786b
AE
3654 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3655 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3656
1da177e4 3657 /* Send queued commands */
5a08ecce
AE
3658 if (atomic_read(&hdev->cmd_cnt)) {
3659 skb = skb_dequeue(&hdev->cmd_q);
3660 if (!skb)
3661 return;
3662
7585b97a 3663 kfree_skb(hdev->sent_cmd);
1da177e4 3664
a675d7f1 3665 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 3666 if (hdev->sent_cmd) {
1da177e4
LT
3667 atomic_dec(&hdev->cmd_cnt);
3668 hci_send_frame(skb);
7bdb8a5c
SJ
3669 if (test_bit(HCI_RESET, &hdev->flags))
3670 del_timer(&hdev->cmd_timer);
3671 else
3672 mod_timer(&hdev->cmd_timer,
5f246e89 3673 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3674 } else {
3675 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3676 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3677 }
3678 }
3679}
2519a1fc 3680
31f7956c
AG
3681u8 bdaddr_to_le(u8 bdaddr_type)
3682{
3683 switch (bdaddr_type) {
3684 case BDADDR_LE_PUBLIC:
3685 return ADDR_LE_DEV_PUBLIC;
3686
3687 default:
3688 /* Fallback to LE Random address type */
3689 return ADDR_LE_DEV_RANDOM;
3690 }
3691}
This page took 1.054704 seconds and 5 git commands to generate.