Bluetooth: Expose static address value for LE capable controllers
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
baf27f6e
MH
58/* ---- HCI debugfs entries ---- */
59
60static int inquiry_cache_show(struct seq_file *f, void *p)
61{
62 struct hci_dev *hdev = f->private;
63 struct discovery_state *cache = &hdev->discovery;
64 struct inquiry_entry *e;
65
66 hci_dev_lock(hdev);
67
68 list_for_each_entry(e, &cache->all, all) {
69 struct inquiry_data *data = &e->data;
70 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
71 &data->bdaddr,
72 data->pscan_rep_mode, data->pscan_period_mode,
73 data->pscan_mode, data->dev_class[2],
74 data->dev_class[1], data->dev_class[0],
75 __le16_to_cpu(data->clock_offset),
76 data->rssi, data->ssp_mode, e->timestamp);
77 }
78
79 hci_dev_unlock(hdev);
80
81 return 0;
82}
83
84static int inquiry_cache_open(struct inode *inode, struct file *file)
85{
86 return single_open(file, inquiry_cache_show, inode->i_private);
87}
88
89static const struct file_operations inquiry_cache_fops = {
90 .open = inquiry_cache_open,
91 .read = seq_read,
92 .llseek = seq_lseek,
93 .release = single_release,
94};
95
ebd1e33b
MH
96static int auto_accept_delay_set(void *data, u64 val)
97{
98 struct hci_dev *hdev = data;
99
100 hci_dev_lock(hdev);
101 hdev->auto_accept_delay = val;
102 hci_dev_unlock(hdev);
103
104 return 0;
105}
106
107static int auto_accept_delay_get(void *data, u64 *val)
108{
109 struct hci_dev *hdev = data;
110
111 hci_dev_lock(hdev);
112 *val = hdev->auto_accept_delay;
113 hci_dev_unlock(hdev);
114
115 return 0;
116}
117
118DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
119 auto_accept_delay_set, "%llu\n");
120
e7b8fc92
MH
121static int static_address_show(struct seq_file *f, void *p)
122{
123 struct hci_dev *hdev = f->private;
124
125 hci_dev_lock(hdev);
126 seq_printf(f, "%pMR\n", &hdev->static_addr);
127 hci_dev_unlock(hdev);
128
129 return 0;
130}
131
132static int static_address_open(struct inode *inode, struct file *file)
133{
134 return single_open(file, static_address_show, inode->i_private);
135}
136
137static const struct file_operations static_address_fops = {
138 .open = static_address_open,
139 .read = seq_read,
140 .llseek = seq_lseek,
141 .release = single_release,
142};
143
1da177e4
LT
144/* ---- HCI requests ---- */
145
42c6b129 146static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 147{
42c6b129 148 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
149
150 if (hdev->req_status == HCI_REQ_PEND) {
151 hdev->req_result = result;
152 hdev->req_status = HCI_REQ_DONE;
153 wake_up_interruptible(&hdev->req_wait_q);
154 }
155}
156
157static void hci_req_cancel(struct hci_dev *hdev, int err)
158{
159 BT_DBG("%s err 0x%2.2x", hdev->name, err);
160
161 if (hdev->req_status == HCI_REQ_PEND) {
162 hdev->req_result = err;
163 hdev->req_status = HCI_REQ_CANCELED;
164 wake_up_interruptible(&hdev->req_wait_q);
165 }
166}
167
77a63e0a
FW
168static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
169 u8 event)
75e84b7c
JH
170{
171 struct hci_ev_cmd_complete *ev;
172 struct hci_event_hdr *hdr;
173 struct sk_buff *skb;
174
175 hci_dev_lock(hdev);
176
177 skb = hdev->recv_evt;
178 hdev->recv_evt = NULL;
179
180 hci_dev_unlock(hdev);
181
182 if (!skb)
183 return ERR_PTR(-ENODATA);
184
185 if (skb->len < sizeof(*hdr)) {
186 BT_ERR("Too short HCI event");
187 goto failed;
188 }
189
190 hdr = (void *) skb->data;
191 skb_pull(skb, HCI_EVENT_HDR_SIZE);
192
7b1abbbe
JH
193 if (event) {
194 if (hdr->evt != event)
195 goto failed;
196 return skb;
197 }
198
75e84b7c
JH
199 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
200 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
201 goto failed;
202 }
203
204 if (skb->len < sizeof(*ev)) {
205 BT_ERR("Too short cmd_complete event");
206 goto failed;
207 }
208
209 ev = (void *) skb->data;
210 skb_pull(skb, sizeof(*ev));
211
212 if (opcode == __le16_to_cpu(ev->opcode))
213 return skb;
214
215 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
216 __le16_to_cpu(ev->opcode));
217
218failed:
219 kfree_skb(skb);
220 return ERR_PTR(-ENODATA);
221}
222
7b1abbbe 223struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 224 const void *param, u8 event, u32 timeout)
75e84b7c
JH
225{
226 DECLARE_WAITQUEUE(wait, current);
227 struct hci_request req;
228 int err = 0;
229
230 BT_DBG("%s", hdev->name);
231
232 hci_req_init(&req, hdev);
233
7b1abbbe 234 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
235
236 hdev->req_status = HCI_REQ_PEND;
237
238 err = hci_req_run(&req, hci_req_sync_complete);
239 if (err < 0)
240 return ERR_PTR(err);
241
242 add_wait_queue(&hdev->req_wait_q, &wait);
243 set_current_state(TASK_INTERRUPTIBLE);
244
245 schedule_timeout(timeout);
246
247 remove_wait_queue(&hdev->req_wait_q, &wait);
248
249 if (signal_pending(current))
250 return ERR_PTR(-EINTR);
251
252 switch (hdev->req_status) {
253 case HCI_REQ_DONE:
254 err = -bt_to_errno(hdev->req_result);
255 break;
256
257 case HCI_REQ_CANCELED:
258 err = -hdev->req_result;
259 break;
260
261 default:
262 err = -ETIMEDOUT;
263 break;
264 }
265
266 hdev->req_status = hdev->req_result = 0;
267
268 BT_DBG("%s end: err %d", hdev->name, err);
269
270 if (err < 0)
271 return ERR_PTR(err);
272
7b1abbbe
JH
273 return hci_get_cmd_complete(hdev, opcode, event);
274}
275EXPORT_SYMBOL(__hci_cmd_sync_ev);
276
277struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 278 const void *param, u32 timeout)
7b1abbbe
JH
279{
280 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
281}
282EXPORT_SYMBOL(__hci_cmd_sync);
283
1da177e4 284/* Execute request and wait for completion. */
01178cd4 285static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
286 void (*func)(struct hci_request *req,
287 unsigned long opt),
01178cd4 288 unsigned long opt, __u32 timeout)
1da177e4 289{
42c6b129 290 struct hci_request req;
1da177e4
LT
291 DECLARE_WAITQUEUE(wait, current);
292 int err = 0;
293
294 BT_DBG("%s start", hdev->name);
295
42c6b129
JH
296 hci_req_init(&req, hdev);
297
1da177e4
LT
298 hdev->req_status = HCI_REQ_PEND;
299
42c6b129 300 func(&req, opt);
53cce22d 301
42c6b129
JH
302 err = hci_req_run(&req, hci_req_sync_complete);
303 if (err < 0) {
53cce22d 304 hdev->req_status = 0;
920c8300
AG
305
306 /* ENODATA means the HCI request command queue is empty.
307 * This can happen when a request with conditionals doesn't
308 * trigger any commands to be sent. This is normal behavior
309 * and should not trigger an error return.
42c6b129 310 */
920c8300
AG
311 if (err == -ENODATA)
312 return 0;
313
314 return err;
53cce22d
JH
315 }
316
bc4445c7
AG
317 add_wait_queue(&hdev->req_wait_q, &wait);
318 set_current_state(TASK_INTERRUPTIBLE);
319
1da177e4
LT
320 schedule_timeout(timeout);
321
322 remove_wait_queue(&hdev->req_wait_q, &wait);
323
324 if (signal_pending(current))
325 return -EINTR;
326
327 switch (hdev->req_status) {
328 case HCI_REQ_DONE:
e175072f 329 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
330 break;
331
332 case HCI_REQ_CANCELED:
333 err = -hdev->req_result;
334 break;
335
336 default:
337 err = -ETIMEDOUT;
338 break;
3ff50b79 339 }
1da177e4 340
a5040efa 341 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
342
343 BT_DBG("%s end: err %d", hdev->name, err);
344
345 return err;
346}
347
01178cd4 348static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
349 void (*req)(struct hci_request *req,
350 unsigned long opt),
01178cd4 351 unsigned long opt, __u32 timeout)
1da177e4
LT
352{
353 int ret;
354
7c6a329e
MH
355 if (!test_bit(HCI_UP, &hdev->flags))
356 return -ENETDOWN;
357
1da177e4
LT
358 /* Serialize all requests */
359 hci_req_lock(hdev);
01178cd4 360 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
361 hci_req_unlock(hdev);
362
363 return ret;
364}
365
42c6b129 366static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 367{
42c6b129 368 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
369
370 /* Reset device */
42c6b129
JH
371 set_bit(HCI_RESET, &req->hdev->flags);
372 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
373}
374
42c6b129 375static void bredr_init(struct hci_request *req)
1da177e4 376{
42c6b129 377 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 378
1da177e4 379 /* Read Local Supported Features */
42c6b129 380 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 381
1143e5a6 382 /* Read Local Version */
42c6b129 383 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
384
385 /* Read BD Address */
42c6b129 386 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
387}
388
42c6b129 389static void amp_init(struct hci_request *req)
e61ef499 390{
42c6b129 391 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 392
e61ef499 393 /* Read Local Version */
42c6b129 394 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 395
f6996cfe
MH
396 /* Read Local Supported Commands */
397 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
398
399 /* Read Local Supported Features */
400 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
401
6bcbc489 402 /* Read Local AMP Info */
42c6b129 403 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
404
405 /* Read Data Blk size */
42c6b129 406 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 407
f38ba941
MH
408 /* Read Flow Control Mode */
409 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
410
7528ca1c
MH
411 /* Read Location Data */
412 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
413}
414
42c6b129 415static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 416{
42c6b129 417 struct hci_dev *hdev = req->hdev;
e61ef499
AE
418
419 BT_DBG("%s %ld", hdev->name, opt);
420
11778716
AE
421 /* Reset */
422 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 423 hci_reset_req(req, 0);
11778716 424
e61ef499
AE
425 switch (hdev->dev_type) {
426 case HCI_BREDR:
42c6b129 427 bredr_init(req);
e61ef499
AE
428 break;
429
430 case HCI_AMP:
42c6b129 431 amp_init(req);
e61ef499
AE
432 break;
433
434 default:
435 BT_ERR("Unknown device type %d", hdev->dev_type);
436 break;
437 }
e61ef499
AE
438}
439
42c6b129 440static void bredr_setup(struct hci_request *req)
2177bab5 441{
4ca048e3
MH
442 struct hci_dev *hdev = req->hdev;
443
2177bab5
JH
444 __le16 param;
445 __u8 flt_type;
446
447 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 448 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
449
450 /* Read Class of Device */
42c6b129 451 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
452
453 /* Read Local Name */
42c6b129 454 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
455
456 /* Read Voice Setting */
42c6b129 457 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 458
b4cb9fb2
MH
459 /* Read Number of Supported IAC */
460 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
461
4b836f39
MH
462 /* Read Current IAC LAP */
463 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
464
2177bab5
JH
465 /* Clear Event Filters */
466 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 467 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
468
469 /* Connection accept timeout ~20 secs */
470 param = __constant_cpu_to_le16(0x7d00);
42c6b129 471 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 472
4ca048e3
MH
473 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
474 * but it does not support page scan related HCI commands.
475 */
476 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
477 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
478 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
479 }
2177bab5
JH
480}
481
42c6b129 482static void le_setup(struct hci_request *req)
2177bab5 483{
c73eee91
JH
484 struct hci_dev *hdev = req->hdev;
485
2177bab5 486 /* Read LE Buffer Size */
42c6b129 487 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
488
489 /* Read LE Local Supported Features */
42c6b129 490 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
491
492 /* Read LE Advertising Channel TX Power */
42c6b129 493 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
494
495 /* Read LE White List Size */
42c6b129 496 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
497
498 /* Read LE Supported States */
42c6b129 499 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
500
501 /* LE-only controllers have LE implicitly enabled */
502 if (!lmp_bredr_capable(hdev))
503 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
504}
505
506static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
507{
508 if (lmp_ext_inq_capable(hdev))
509 return 0x02;
510
511 if (lmp_inq_rssi_capable(hdev))
512 return 0x01;
513
514 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
515 hdev->lmp_subver == 0x0757)
516 return 0x01;
517
518 if (hdev->manufacturer == 15) {
519 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
520 return 0x01;
521 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
522 return 0x01;
523 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
524 return 0x01;
525 }
526
527 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
528 hdev->lmp_subver == 0x1805)
529 return 0x01;
530
531 return 0x00;
532}
533
42c6b129 534static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
535{
536 u8 mode;
537
42c6b129 538 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 539
42c6b129 540 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
541}
542
42c6b129 543static void hci_setup_event_mask(struct hci_request *req)
2177bab5 544{
42c6b129
JH
545 struct hci_dev *hdev = req->hdev;
546
2177bab5
JH
547 /* The second byte is 0xff instead of 0x9f (two reserved bits
548 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
549 * command otherwise.
550 */
551 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
552
553 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
554 * any event mask for pre 1.2 devices.
555 */
556 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
557 return;
558
559 if (lmp_bredr_capable(hdev)) {
560 events[4] |= 0x01; /* Flow Specification Complete */
561 events[4] |= 0x02; /* Inquiry Result with RSSI */
562 events[4] |= 0x04; /* Read Remote Extended Features Complete */
563 events[5] |= 0x08; /* Synchronous Connection Complete */
564 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
565 } else {
566 /* Use a different default for LE-only devices */
567 memset(events, 0, sizeof(events));
568 events[0] |= 0x10; /* Disconnection Complete */
569 events[0] |= 0x80; /* Encryption Change */
570 events[1] |= 0x08; /* Read Remote Version Information Complete */
571 events[1] |= 0x20; /* Command Complete */
572 events[1] |= 0x40; /* Command Status */
573 events[1] |= 0x80; /* Hardware Error */
574 events[2] |= 0x04; /* Number of Completed Packets */
575 events[3] |= 0x02; /* Data Buffer Overflow */
576 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
577 }
578
579 if (lmp_inq_rssi_capable(hdev))
580 events[4] |= 0x02; /* Inquiry Result with RSSI */
581
582 if (lmp_sniffsubr_capable(hdev))
583 events[5] |= 0x20; /* Sniff Subrating */
584
585 if (lmp_pause_enc_capable(hdev))
586 events[5] |= 0x80; /* Encryption Key Refresh Complete */
587
588 if (lmp_ext_inq_capable(hdev))
589 events[5] |= 0x40; /* Extended Inquiry Result */
590
591 if (lmp_no_flush_capable(hdev))
592 events[7] |= 0x01; /* Enhanced Flush Complete */
593
594 if (lmp_lsto_capable(hdev))
595 events[6] |= 0x80; /* Link Supervision Timeout Changed */
596
597 if (lmp_ssp_capable(hdev)) {
598 events[6] |= 0x01; /* IO Capability Request */
599 events[6] |= 0x02; /* IO Capability Response */
600 events[6] |= 0x04; /* User Confirmation Request */
601 events[6] |= 0x08; /* User Passkey Request */
602 events[6] |= 0x10; /* Remote OOB Data Request */
603 events[6] |= 0x20; /* Simple Pairing Complete */
604 events[7] |= 0x04; /* User Passkey Notification */
605 events[7] |= 0x08; /* Keypress Notification */
606 events[7] |= 0x10; /* Remote Host Supported
607 * Features Notification
608 */
609 }
610
611 if (lmp_le_capable(hdev))
612 events[7] |= 0x20; /* LE Meta-Event */
613
42c6b129 614 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
615
616 if (lmp_le_capable(hdev)) {
617 memset(events, 0, sizeof(events));
618 events[0] = 0x1f;
42c6b129
JH
619 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
620 sizeof(events), events);
2177bab5
JH
621 }
622}
623
42c6b129 624static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 625{
42c6b129
JH
626 struct hci_dev *hdev = req->hdev;
627
2177bab5 628 if (lmp_bredr_capable(hdev))
42c6b129 629 bredr_setup(req);
56f87901
JH
630 else
631 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
632
633 if (lmp_le_capable(hdev))
42c6b129 634 le_setup(req);
2177bab5 635
42c6b129 636 hci_setup_event_mask(req);
2177bab5 637
3f8e2d75
JH
638 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
639 * local supported commands HCI command.
640 */
641 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 642 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
643
644 if (lmp_ssp_capable(hdev)) {
645 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
646 u8 mode = 0x01;
42c6b129
JH
647 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
648 sizeof(mode), &mode);
2177bab5
JH
649 } else {
650 struct hci_cp_write_eir cp;
651
652 memset(hdev->eir, 0, sizeof(hdev->eir));
653 memset(&cp, 0, sizeof(cp));
654
42c6b129 655 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
656 }
657 }
658
659 if (lmp_inq_rssi_capable(hdev))
42c6b129 660 hci_setup_inquiry_mode(req);
2177bab5
JH
661
662 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 663 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
664
665 if (lmp_ext_feat_capable(hdev)) {
666 struct hci_cp_read_local_ext_features cp;
667
668 cp.page = 0x01;
42c6b129
JH
669 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
670 sizeof(cp), &cp);
2177bab5
JH
671 }
672
673 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
674 u8 enable = 1;
42c6b129
JH
675 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
676 &enable);
2177bab5
JH
677 }
678}
679
42c6b129 680static void hci_setup_link_policy(struct hci_request *req)
2177bab5 681{
42c6b129 682 struct hci_dev *hdev = req->hdev;
2177bab5
JH
683 struct hci_cp_write_def_link_policy cp;
684 u16 link_policy = 0;
685
686 if (lmp_rswitch_capable(hdev))
687 link_policy |= HCI_LP_RSWITCH;
688 if (lmp_hold_capable(hdev))
689 link_policy |= HCI_LP_HOLD;
690 if (lmp_sniff_capable(hdev))
691 link_policy |= HCI_LP_SNIFF;
692 if (lmp_park_capable(hdev))
693 link_policy |= HCI_LP_PARK;
694
695 cp.policy = cpu_to_le16(link_policy);
42c6b129 696 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
697}
698
42c6b129 699static void hci_set_le_support(struct hci_request *req)
2177bab5 700{
42c6b129 701 struct hci_dev *hdev = req->hdev;
2177bab5
JH
702 struct hci_cp_write_le_host_supported cp;
703
c73eee91
JH
704 /* LE-only devices do not support explicit enablement */
705 if (!lmp_bredr_capable(hdev))
706 return;
707
2177bab5
JH
708 memset(&cp, 0, sizeof(cp));
709
710 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
711 cp.le = 0x01;
712 cp.simul = lmp_le_br_capable(hdev);
713 }
714
715 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
716 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
717 &cp);
2177bab5
JH
718}
719
d62e6d67
JH
720static void hci_set_event_mask_page_2(struct hci_request *req)
721{
722 struct hci_dev *hdev = req->hdev;
723 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
724
725 /* If Connectionless Slave Broadcast master role is supported
726 * enable all necessary events for it.
727 */
728 if (hdev->features[2][0] & 0x01) {
729 events[1] |= 0x40; /* Triggered Clock Capture */
730 events[1] |= 0x80; /* Synchronization Train Complete */
731 events[2] |= 0x10; /* Slave Page Response Timeout */
732 events[2] |= 0x20; /* CSB Channel Map Change */
733 }
734
735 /* If Connectionless Slave Broadcast slave role is supported
736 * enable all necessary events for it.
737 */
738 if (hdev->features[2][0] & 0x02) {
739 events[2] |= 0x01; /* Synchronization Train Received */
740 events[2] |= 0x02; /* CSB Receive */
741 events[2] |= 0x04; /* CSB Timeout */
742 events[2] |= 0x08; /* Truncated Page Complete */
743 }
744
745 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
746}
747
42c6b129 748static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 749{
42c6b129 750 struct hci_dev *hdev = req->hdev;
d2c5d77f 751 u8 p;
42c6b129 752
b8f4e068
GP
753 /* Some Broadcom based Bluetooth controllers do not support the
754 * Delete Stored Link Key command. They are clearly indicating its
755 * absence in the bit mask of supported commands.
756 *
757 * Check the supported commands and only if the the command is marked
758 * as supported send it. If not supported assume that the controller
759 * does not have actual support for stored link keys which makes this
760 * command redundant anyway.
637b4cae 761 */
59f45d57
JH
762 if (hdev->commands[6] & 0x80) {
763 struct hci_cp_delete_stored_link_key cp;
764
765 bacpy(&cp.bdaddr, BDADDR_ANY);
766 cp.delete_all = 0x01;
767 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
768 sizeof(cp), &cp);
769 }
770
2177bab5 771 if (hdev->commands[5] & 0x10)
42c6b129 772 hci_setup_link_policy(req);
2177bab5 773
441ad2d0 774 if (lmp_le_capable(hdev))
42c6b129 775 hci_set_le_support(req);
d2c5d77f
JH
776
777 /* Read features beyond page 1 if available */
778 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
779 struct hci_cp_read_local_ext_features cp;
780
781 cp.page = p;
782 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
783 sizeof(cp), &cp);
784 }
2177bab5
JH
785}
786
5d4e7e8d
JH
787static void hci_init4_req(struct hci_request *req, unsigned long opt)
788{
789 struct hci_dev *hdev = req->hdev;
790
d62e6d67
JH
791 /* Set event mask page 2 if the HCI command for it is supported */
792 if (hdev->commands[22] & 0x04)
793 hci_set_event_mask_page_2(req);
794
5d4e7e8d
JH
795 /* Check for Synchronization Train support */
796 if (hdev->features[2][0] & 0x04)
797 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
798}
799
2177bab5
JH
800static int __hci_init(struct hci_dev *hdev)
801{
802 int err;
803
804 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
805 if (err < 0)
806 return err;
807
808 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
809 * BR/EDR/LE type controllers. AMP controllers only need the
810 * first stage init.
811 */
812 if (hdev->dev_type != HCI_BREDR)
813 return 0;
814
815 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
816 if (err < 0)
817 return err;
818
5d4e7e8d
JH
819 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
820 if (err < 0)
821 return err;
822
baf27f6e
MH
823 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
824 if (err < 0)
825 return err;
826
827 /* Only create debugfs entries during the initial setup
828 * phase and not every time the controller gets powered on.
829 */
830 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
831 return 0;
832
833 if (lmp_bredr_capable(hdev)) {
834 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
835 hdev, &inquiry_cache_fops);
836 }
837
ebd1e33b
MH
838 if (lmp_ssp_capable(hdev))
839 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
840 hdev, &auto_accept_delay_fops);
841
e7b8fc92
MH
842 if (lmp_le_capable(hdev))
843 debugfs_create_file("static_address", 0444, hdev->debugfs,
844 hdev, &static_address_fops);
845
baf27f6e 846 return 0;
2177bab5
JH
847}
848
42c6b129 849static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
850{
851 __u8 scan = opt;
852
42c6b129 853 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
854
855 /* Inquiry and Page scans */
42c6b129 856 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
857}
858
42c6b129 859static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
860{
861 __u8 auth = opt;
862
42c6b129 863 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
864
865 /* Authentication */
42c6b129 866 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
867}
868
42c6b129 869static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
870{
871 __u8 encrypt = opt;
872
42c6b129 873 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 874
e4e8e37c 875 /* Encryption */
42c6b129 876 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
877}
878
42c6b129 879static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
880{
881 __le16 policy = cpu_to_le16(opt);
882
42c6b129 883 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
884
885 /* Default link policy */
42c6b129 886 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
887}
888
8e87d142 889/* Get HCI device by index.
1da177e4
LT
890 * Device is held on return. */
891struct hci_dev *hci_dev_get(int index)
892{
8035ded4 893 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
894
895 BT_DBG("%d", index);
896
897 if (index < 0)
898 return NULL;
899
900 read_lock(&hci_dev_list_lock);
8035ded4 901 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
902 if (d->id == index) {
903 hdev = hci_dev_hold(d);
904 break;
905 }
906 }
907 read_unlock(&hci_dev_list_lock);
908 return hdev;
909}
1da177e4
LT
910
911/* ---- Inquiry support ---- */
ff9ef578 912
30dc78e1
JH
913bool hci_discovery_active(struct hci_dev *hdev)
914{
915 struct discovery_state *discov = &hdev->discovery;
916
6fbe195d 917 switch (discov->state) {
343f935b 918 case DISCOVERY_FINDING:
6fbe195d 919 case DISCOVERY_RESOLVING:
30dc78e1
JH
920 return true;
921
6fbe195d
AG
922 default:
923 return false;
924 }
30dc78e1
JH
925}
926
ff9ef578
JH
927void hci_discovery_set_state(struct hci_dev *hdev, int state)
928{
929 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
930
931 if (hdev->discovery.state == state)
932 return;
933
934 switch (state) {
935 case DISCOVERY_STOPPED:
7b99b659
AG
936 if (hdev->discovery.state != DISCOVERY_STARTING)
937 mgmt_discovering(hdev, 0);
ff9ef578
JH
938 break;
939 case DISCOVERY_STARTING:
940 break;
343f935b 941 case DISCOVERY_FINDING:
ff9ef578
JH
942 mgmt_discovering(hdev, 1);
943 break;
30dc78e1
JH
944 case DISCOVERY_RESOLVING:
945 break;
ff9ef578
JH
946 case DISCOVERY_STOPPING:
947 break;
948 }
949
950 hdev->discovery.state = state;
951}
952
1f9b9a5d 953void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 954{
30883512 955 struct discovery_state *cache = &hdev->discovery;
b57c1a56 956 struct inquiry_entry *p, *n;
1da177e4 957
561aafbc
JH
958 list_for_each_entry_safe(p, n, &cache->all, all) {
959 list_del(&p->all);
b57c1a56 960 kfree(p);
1da177e4 961 }
561aafbc
JH
962
963 INIT_LIST_HEAD(&cache->unknown);
964 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
965}
966
a8c5fb1a
GP
967struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
968 bdaddr_t *bdaddr)
1da177e4 969{
30883512 970 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
971 struct inquiry_entry *e;
972
6ed93dc6 973 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 974
561aafbc
JH
975 list_for_each_entry(e, &cache->all, all) {
976 if (!bacmp(&e->data.bdaddr, bdaddr))
977 return e;
978 }
979
980 return NULL;
981}
982
983struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 984 bdaddr_t *bdaddr)
561aafbc 985{
30883512 986 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
987 struct inquiry_entry *e;
988
6ed93dc6 989 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
990
991 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 992 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
993 return e;
994 }
995
996 return NULL;
1da177e4
LT
997}
998
30dc78e1 999struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1000 bdaddr_t *bdaddr,
1001 int state)
30dc78e1
JH
1002{
1003 struct discovery_state *cache = &hdev->discovery;
1004 struct inquiry_entry *e;
1005
6ed93dc6 1006 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1007
1008 list_for_each_entry(e, &cache->resolve, list) {
1009 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1010 return e;
1011 if (!bacmp(&e->data.bdaddr, bdaddr))
1012 return e;
1013 }
1014
1015 return NULL;
1016}
1017
a3d4e20a 1018void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1019 struct inquiry_entry *ie)
a3d4e20a
JH
1020{
1021 struct discovery_state *cache = &hdev->discovery;
1022 struct list_head *pos = &cache->resolve;
1023 struct inquiry_entry *p;
1024
1025 list_del(&ie->list);
1026
1027 list_for_each_entry(p, &cache->resolve, list) {
1028 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1029 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1030 break;
1031 pos = &p->list;
1032 }
1033
1034 list_add(&ie->list, pos);
1035}
1036
3175405b 1037bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1038 bool name_known, bool *ssp)
1da177e4 1039{
30883512 1040 struct discovery_state *cache = &hdev->discovery;
70f23020 1041 struct inquiry_entry *ie;
1da177e4 1042
6ed93dc6 1043 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1044
2b2fec4d
SJ
1045 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1046
388fc8fa
JH
1047 if (ssp)
1048 *ssp = data->ssp_mode;
1049
70f23020 1050 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1051 if (ie) {
388fc8fa
JH
1052 if (ie->data.ssp_mode && ssp)
1053 *ssp = true;
1054
a3d4e20a 1055 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1056 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1057 ie->data.rssi = data->rssi;
1058 hci_inquiry_cache_update_resolve(hdev, ie);
1059 }
1060
561aafbc 1061 goto update;
a3d4e20a 1062 }
561aafbc
JH
1063
1064 /* Entry not in the cache. Add new one. */
1065 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1066 if (!ie)
3175405b 1067 return false;
561aafbc
JH
1068
1069 list_add(&ie->all, &cache->all);
1070
1071 if (name_known) {
1072 ie->name_state = NAME_KNOWN;
1073 } else {
1074 ie->name_state = NAME_NOT_KNOWN;
1075 list_add(&ie->list, &cache->unknown);
1076 }
70f23020 1077
561aafbc
JH
1078update:
1079 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1080 ie->name_state != NAME_PENDING) {
561aafbc
JH
1081 ie->name_state = NAME_KNOWN;
1082 list_del(&ie->list);
1da177e4
LT
1083 }
1084
70f23020
AE
1085 memcpy(&ie->data, data, sizeof(*data));
1086 ie->timestamp = jiffies;
1da177e4 1087 cache->timestamp = jiffies;
3175405b
JH
1088
1089 if (ie->name_state == NAME_NOT_KNOWN)
1090 return false;
1091
1092 return true;
1da177e4
LT
1093}
1094
1095static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1096{
30883512 1097 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1098 struct inquiry_info *info = (struct inquiry_info *) buf;
1099 struct inquiry_entry *e;
1100 int copied = 0;
1101
561aafbc 1102 list_for_each_entry(e, &cache->all, all) {
1da177e4 1103 struct inquiry_data *data = &e->data;
b57c1a56
JH
1104
1105 if (copied >= num)
1106 break;
1107
1da177e4
LT
1108 bacpy(&info->bdaddr, &data->bdaddr);
1109 info->pscan_rep_mode = data->pscan_rep_mode;
1110 info->pscan_period_mode = data->pscan_period_mode;
1111 info->pscan_mode = data->pscan_mode;
1112 memcpy(info->dev_class, data->dev_class, 3);
1113 info->clock_offset = data->clock_offset;
b57c1a56 1114
1da177e4 1115 info++;
b57c1a56 1116 copied++;
1da177e4
LT
1117 }
1118
1119 BT_DBG("cache %p, copied %d", cache, copied);
1120 return copied;
1121}
1122
42c6b129 1123static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1124{
1125 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1126 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1127 struct hci_cp_inquiry cp;
1128
1129 BT_DBG("%s", hdev->name);
1130
1131 if (test_bit(HCI_INQUIRY, &hdev->flags))
1132 return;
1133
1134 /* Start Inquiry */
1135 memcpy(&cp.lap, &ir->lap, 3);
1136 cp.length = ir->length;
1137 cp.num_rsp = ir->num_rsp;
42c6b129 1138 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1139}
1140
3e13fa1e
AG
1141static int wait_inquiry(void *word)
1142{
1143 schedule();
1144 return signal_pending(current);
1145}
1146
1da177e4
LT
1147int hci_inquiry(void __user *arg)
1148{
1149 __u8 __user *ptr = arg;
1150 struct hci_inquiry_req ir;
1151 struct hci_dev *hdev;
1152 int err = 0, do_inquiry = 0, max_rsp;
1153 long timeo;
1154 __u8 *buf;
1155
1156 if (copy_from_user(&ir, ptr, sizeof(ir)))
1157 return -EFAULT;
1158
5a08ecce
AE
1159 hdev = hci_dev_get(ir.dev_id);
1160 if (!hdev)
1da177e4
LT
1161 return -ENODEV;
1162
0736cfa8
MH
1163 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1164 err = -EBUSY;
1165 goto done;
1166 }
1167
5b69bef5
MH
1168 if (hdev->dev_type != HCI_BREDR) {
1169 err = -EOPNOTSUPP;
1170 goto done;
1171 }
1172
56f87901
JH
1173 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1174 err = -EOPNOTSUPP;
1175 goto done;
1176 }
1177
09fd0de5 1178 hci_dev_lock(hdev);
8e87d142 1179 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1180 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1181 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1182 do_inquiry = 1;
1183 }
09fd0de5 1184 hci_dev_unlock(hdev);
1da177e4 1185
04837f64 1186 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1187
1188 if (do_inquiry) {
01178cd4
JH
1189 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1190 timeo);
70f23020
AE
1191 if (err < 0)
1192 goto done;
3e13fa1e
AG
1193
1194 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1195 * cleared). If it is interrupted by a signal, return -EINTR.
1196 */
1197 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1198 TASK_INTERRUPTIBLE))
1199 return -EINTR;
70f23020 1200 }
1da177e4 1201
8fc9ced3
GP
1202 /* for unlimited number of responses we will use buffer with
1203 * 255 entries
1204 */
1da177e4
LT
1205 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1206
1207 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1208 * copy it to the user space.
1209 */
01df8c31 1210 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1211 if (!buf) {
1da177e4
LT
1212 err = -ENOMEM;
1213 goto done;
1214 }
1215
09fd0de5 1216 hci_dev_lock(hdev);
1da177e4 1217 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1218 hci_dev_unlock(hdev);
1da177e4
LT
1219
1220 BT_DBG("num_rsp %d", ir.num_rsp);
1221
1222 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1223 ptr += sizeof(ir);
1224 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1225 ir.num_rsp))
1da177e4 1226 err = -EFAULT;
8e87d142 1227 } else
1da177e4
LT
1228 err = -EFAULT;
1229
1230 kfree(buf);
1231
1232done:
1233 hci_dev_put(hdev);
1234 return err;
1235}
1236
cbed0ca1 1237static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1238{
1da177e4
LT
1239 int ret = 0;
1240
1da177e4
LT
1241 BT_DBG("%s %p", hdev->name, hdev);
1242
1243 hci_req_lock(hdev);
1244
94324962
JH
1245 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1246 ret = -ENODEV;
1247 goto done;
1248 }
1249
a5c8f270
MH
1250 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1251 /* Check for rfkill but allow the HCI setup stage to
1252 * proceed (which in itself doesn't cause any RF activity).
1253 */
1254 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1255 ret = -ERFKILL;
1256 goto done;
1257 }
1258
1259 /* Check for valid public address or a configured static
1260 * random adddress, but let the HCI setup proceed to
1261 * be able to determine if there is a public address
1262 * or not.
1263 *
1264 * This check is only valid for BR/EDR controllers
1265 * since AMP controllers do not have an address.
1266 */
1267 if (hdev->dev_type == HCI_BREDR &&
1268 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1269 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1270 ret = -EADDRNOTAVAIL;
1271 goto done;
1272 }
611b30f7
MH
1273 }
1274
1da177e4
LT
1275 if (test_bit(HCI_UP, &hdev->flags)) {
1276 ret = -EALREADY;
1277 goto done;
1278 }
1279
1da177e4
LT
1280 if (hdev->open(hdev)) {
1281 ret = -EIO;
1282 goto done;
1283 }
1284
f41c70c4
MH
1285 atomic_set(&hdev->cmd_cnt, 1);
1286 set_bit(HCI_INIT, &hdev->flags);
1287
1288 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1289 ret = hdev->setup(hdev);
1290
1291 if (!ret) {
f41c70c4
MH
1292 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1293 set_bit(HCI_RAW, &hdev->flags);
1294
0736cfa8
MH
1295 if (!test_bit(HCI_RAW, &hdev->flags) &&
1296 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1297 ret = __hci_init(hdev);
1da177e4
LT
1298 }
1299
f41c70c4
MH
1300 clear_bit(HCI_INIT, &hdev->flags);
1301
1da177e4
LT
1302 if (!ret) {
1303 hci_dev_hold(hdev);
1304 set_bit(HCI_UP, &hdev->flags);
1305 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1306 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1307 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1308 hdev->dev_type == HCI_BREDR) {
09fd0de5 1309 hci_dev_lock(hdev);
744cf19e 1310 mgmt_powered(hdev, 1);
09fd0de5 1311 hci_dev_unlock(hdev);
56e5cb86 1312 }
8e87d142 1313 } else {
1da177e4 1314 /* Init failed, cleanup */
3eff45ea 1315 flush_work(&hdev->tx_work);
c347b765 1316 flush_work(&hdev->cmd_work);
b78752cc 1317 flush_work(&hdev->rx_work);
1da177e4
LT
1318
1319 skb_queue_purge(&hdev->cmd_q);
1320 skb_queue_purge(&hdev->rx_q);
1321
1322 if (hdev->flush)
1323 hdev->flush(hdev);
1324
1325 if (hdev->sent_cmd) {
1326 kfree_skb(hdev->sent_cmd);
1327 hdev->sent_cmd = NULL;
1328 }
1329
1330 hdev->close(hdev);
1331 hdev->flags = 0;
1332 }
1333
1334done:
1335 hci_req_unlock(hdev);
1da177e4
LT
1336 return ret;
1337}
1338
cbed0ca1
JH
1339/* ---- HCI ioctl helpers ---- */
1340
1341int hci_dev_open(__u16 dev)
1342{
1343 struct hci_dev *hdev;
1344 int err;
1345
1346 hdev = hci_dev_get(dev);
1347 if (!hdev)
1348 return -ENODEV;
1349
e1d08f40
JH
1350 /* We need to ensure that no other power on/off work is pending
1351 * before proceeding to call hci_dev_do_open. This is
1352 * particularly important if the setup procedure has not yet
1353 * completed.
1354 */
1355 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1356 cancel_delayed_work(&hdev->power_off);
1357
a5c8f270
MH
1358 /* After this call it is guaranteed that the setup procedure
1359 * has finished. This means that error conditions like RFKILL
1360 * or no valid public or static random address apply.
1361 */
e1d08f40
JH
1362 flush_workqueue(hdev->req_workqueue);
1363
cbed0ca1
JH
1364 err = hci_dev_do_open(hdev);
1365
1366 hci_dev_put(hdev);
1367
1368 return err;
1369}
1370
1da177e4
LT
1371static int hci_dev_do_close(struct hci_dev *hdev)
1372{
1373 BT_DBG("%s %p", hdev->name, hdev);
1374
78c04c0b
VCG
1375 cancel_delayed_work(&hdev->power_off);
1376
1da177e4
LT
1377 hci_req_cancel(hdev, ENODEV);
1378 hci_req_lock(hdev);
1379
1380 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1381 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1382 hci_req_unlock(hdev);
1383 return 0;
1384 }
1385
3eff45ea
GP
1386 /* Flush RX and TX works */
1387 flush_work(&hdev->tx_work);
b78752cc 1388 flush_work(&hdev->rx_work);
1da177e4 1389
16ab91ab 1390 if (hdev->discov_timeout > 0) {
e0f9309f 1391 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1392 hdev->discov_timeout = 0;
5e5282bb 1393 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 1394 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1395 }
1396
a8b2d5c2 1397 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1398 cancel_delayed_work(&hdev->service_cache);
1399
7ba8b4be
AG
1400 cancel_delayed_work_sync(&hdev->le_scan_disable);
1401
09fd0de5 1402 hci_dev_lock(hdev);
1f9b9a5d 1403 hci_inquiry_cache_flush(hdev);
1da177e4 1404 hci_conn_hash_flush(hdev);
09fd0de5 1405 hci_dev_unlock(hdev);
1da177e4
LT
1406
1407 hci_notify(hdev, HCI_DEV_DOWN);
1408
1409 if (hdev->flush)
1410 hdev->flush(hdev);
1411
1412 /* Reset device */
1413 skb_queue_purge(&hdev->cmd_q);
1414 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1415 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 1416 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 1417 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1418 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1419 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1420 clear_bit(HCI_INIT, &hdev->flags);
1421 }
1422
c347b765
GP
1423 /* flush cmd work */
1424 flush_work(&hdev->cmd_work);
1da177e4
LT
1425
1426 /* Drop queues */
1427 skb_queue_purge(&hdev->rx_q);
1428 skb_queue_purge(&hdev->cmd_q);
1429 skb_queue_purge(&hdev->raw_q);
1430
1431 /* Drop last sent command */
1432 if (hdev->sent_cmd) {
b79f44c1 1433 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1434 kfree_skb(hdev->sent_cmd);
1435 hdev->sent_cmd = NULL;
1436 }
1437
b6ddb638
JH
1438 kfree_skb(hdev->recv_evt);
1439 hdev->recv_evt = NULL;
1440
1da177e4
LT
1441 /* After this point our queues are empty
1442 * and no tasks are scheduled. */
1443 hdev->close(hdev);
1444
35b973c9
JH
1445 /* Clear flags */
1446 hdev->flags = 0;
1447 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1448
93c311a0
MH
1449 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1450 if (hdev->dev_type == HCI_BREDR) {
1451 hci_dev_lock(hdev);
1452 mgmt_powered(hdev, 0);
1453 hci_dev_unlock(hdev);
1454 }
8ee56540 1455 }
5add6af8 1456
ced5c338 1457 /* Controller radio is available but is currently powered down */
536619e8 1458 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1459
e59fda8d 1460 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1461 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1462
1da177e4
LT
1463 hci_req_unlock(hdev);
1464
1465 hci_dev_put(hdev);
1466 return 0;
1467}
1468
1469int hci_dev_close(__u16 dev)
1470{
1471 struct hci_dev *hdev;
1472 int err;
1473
70f23020
AE
1474 hdev = hci_dev_get(dev);
1475 if (!hdev)
1da177e4 1476 return -ENODEV;
8ee56540 1477
0736cfa8
MH
1478 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1479 err = -EBUSY;
1480 goto done;
1481 }
1482
8ee56540
MH
1483 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1484 cancel_delayed_work(&hdev->power_off);
1485
1da177e4 1486 err = hci_dev_do_close(hdev);
8ee56540 1487
0736cfa8 1488done:
1da177e4
LT
1489 hci_dev_put(hdev);
1490 return err;
1491}
1492
1493int hci_dev_reset(__u16 dev)
1494{
1495 struct hci_dev *hdev;
1496 int ret = 0;
1497
70f23020
AE
1498 hdev = hci_dev_get(dev);
1499 if (!hdev)
1da177e4
LT
1500 return -ENODEV;
1501
1502 hci_req_lock(hdev);
1da177e4 1503
808a049e
MH
1504 if (!test_bit(HCI_UP, &hdev->flags)) {
1505 ret = -ENETDOWN;
1da177e4 1506 goto done;
808a049e 1507 }
1da177e4 1508
0736cfa8
MH
1509 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1510 ret = -EBUSY;
1511 goto done;
1512 }
1513
1da177e4
LT
1514 /* Drop queues */
1515 skb_queue_purge(&hdev->rx_q);
1516 skb_queue_purge(&hdev->cmd_q);
1517
09fd0de5 1518 hci_dev_lock(hdev);
1f9b9a5d 1519 hci_inquiry_cache_flush(hdev);
1da177e4 1520 hci_conn_hash_flush(hdev);
09fd0de5 1521 hci_dev_unlock(hdev);
1da177e4
LT
1522
1523 if (hdev->flush)
1524 hdev->flush(hdev);
1525
8e87d142 1526 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1527 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1528
1529 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1530 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1531
1532done:
1da177e4
LT
1533 hci_req_unlock(hdev);
1534 hci_dev_put(hdev);
1535 return ret;
1536}
1537
1538int hci_dev_reset_stat(__u16 dev)
1539{
1540 struct hci_dev *hdev;
1541 int ret = 0;
1542
70f23020
AE
1543 hdev = hci_dev_get(dev);
1544 if (!hdev)
1da177e4
LT
1545 return -ENODEV;
1546
0736cfa8
MH
1547 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1548 ret = -EBUSY;
1549 goto done;
1550 }
1551
1da177e4
LT
1552 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1553
0736cfa8 1554done:
1da177e4 1555 hci_dev_put(hdev);
1da177e4
LT
1556 return ret;
1557}
1558
1559int hci_dev_cmd(unsigned int cmd, void __user *arg)
1560{
1561 struct hci_dev *hdev;
1562 struct hci_dev_req dr;
1563 int err = 0;
1564
1565 if (copy_from_user(&dr, arg, sizeof(dr)))
1566 return -EFAULT;
1567
70f23020
AE
1568 hdev = hci_dev_get(dr.dev_id);
1569 if (!hdev)
1da177e4
LT
1570 return -ENODEV;
1571
0736cfa8
MH
1572 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1573 err = -EBUSY;
1574 goto done;
1575 }
1576
5b69bef5
MH
1577 if (hdev->dev_type != HCI_BREDR) {
1578 err = -EOPNOTSUPP;
1579 goto done;
1580 }
1581
56f87901
JH
1582 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1583 err = -EOPNOTSUPP;
1584 goto done;
1585 }
1586
1da177e4
LT
1587 switch (cmd) {
1588 case HCISETAUTH:
01178cd4
JH
1589 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1590 HCI_INIT_TIMEOUT);
1da177e4
LT
1591 break;
1592
1593 case HCISETENCRYPT:
1594 if (!lmp_encrypt_capable(hdev)) {
1595 err = -EOPNOTSUPP;
1596 break;
1597 }
1598
1599 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1600 /* Auth must be enabled first */
01178cd4
JH
1601 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1602 HCI_INIT_TIMEOUT);
1da177e4
LT
1603 if (err)
1604 break;
1605 }
1606
01178cd4
JH
1607 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1608 HCI_INIT_TIMEOUT);
1da177e4
LT
1609 break;
1610
1611 case HCISETSCAN:
01178cd4
JH
1612 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1613 HCI_INIT_TIMEOUT);
1da177e4
LT
1614 break;
1615
1da177e4 1616 case HCISETLINKPOL:
01178cd4
JH
1617 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1618 HCI_INIT_TIMEOUT);
1da177e4
LT
1619 break;
1620
1621 case HCISETLINKMODE:
e4e8e37c
MH
1622 hdev->link_mode = ((__u16) dr.dev_opt) &
1623 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1624 break;
1625
1626 case HCISETPTYPE:
1627 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1628 break;
1629
1630 case HCISETACLMTU:
e4e8e37c
MH
1631 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1632 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1633 break;
1634
1635 case HCISETSCOMTU:
e4e8e37c
MH
1636 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1637 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1638 break;
1639
1640 default:
1641 err = -EINVAL;
1642 break;
1643 }
e4e8e37c 1644
0736cfa8 1645done:
1da177e4
LT
1646 hci_dev_put(hdev);
1647 return err;
1648}
1649
1650int hci_get_dev_list(void __user *arg)
1651{
8035ded4 1652 struct hci_dev *hdev;
1da177e4
LT
1653 struct hci_dev_list_req *dl;
1654 struct hci_dev_req *dr;
1da177e4
LT
1655 int n = 0, size, err;
1656 __u16 dev_num;
1657
1658 if (get_user(dev_num, (__u16 __user *) arg))
1659 return -EFAULT;
1660
1661 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1662 return -EINVAL;
1663
1664 size = sizeof(*dl) + dev_num * sizeof(*dr);
1665
70f23020
AE
1666 dl = kzalloc(size, GFP_KERNEL);
1667 if (!dl)
1da177e4
LT
1668 return -ENOMEM;
1669
1670 dr = dl->dev_req;
1671
f20d09d5 1672 read_lock(&hci_dev_list_lock);
8035ded4 1673 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1674 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1675 cancel_delayed_work(&hdev->power_off);
c542a06c 1676
a8b2d5c2
JH
1677 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1678 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1679
1da177e4
LT
1680 (dr + n)->dev_id = hdev->id;
1681 (dr + n)->dev_opt = hdev->flags;
c542a06c 1682
1da177e4
LT
1683 if (++n >= dev_num)
1684 break;
1685 }
f20d09d5 1686 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1687
1688 dl->dev_num = n;
1689 size = sizeof(*dl) + n * sizeof(*dr);
1690
1691 err = copy_to_user(arg, dl, size);
1692 kfree(dl);
1693
1694 return err ? -EFAULT : 0;
1695}
1696
1697int hci_get_dev_info(void __user *arg)
1698{
1699 struct hci_dev *hdev;
1700 struct hci_dev_info di;
1701 int err = 0;
1702
1703 if (copy_from_user(&di, arg, sizeof(di)))
1704 return -EFAULT;
1705
70f23020
AE
1706 hdev = hci_dev_get(di.dev_id);
1707 if (!hdev)
1da177e4
LT
1708 return -ENODEV;
1709
a8b2d5c2 1710 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1711 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1712
a8b2d5c2
JH
1713 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1714 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1715
1da177e4
LT
1716 strcpy(di.name, hdev->name);
1717 di.bdaddr = hdev->bdaddr;
60f2a3ed 1718 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
1719 di.flags = hdev->flags;
1720 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1721 if (lmp_bredr_capable(hdev)) {
1722 di.acl_mtu = hdev->acl_mtu;
1723 di.acl_pkts = hdev->acl_pkts;
1724 di.sco_mtu = hdev->sco_mtu;
1725 di.sco_pkts = hdev->sco_pkts;
1726 } else {
1727 di.acl_mtu = hdev->le_mtu;
1728 di.acl_pkts = hdev->le_pkts;
1729 di.sco_mtu = 0;
1730 di.sco_pkts = 0;
1731 }
1da177e4
LT
1732 di.link_policy = hdev->link_policy;
1733 di.link_mode = hdev->link_mode;
1734
1735 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1736 memcpy(&di.features, &hdev->features, sizeof(di.features));
1737
1738 if (copy_to_user(arg, &di, sizeof(di)))
1739 err = -EFAULT;
1740
1741 hci_dev_put(hdev);
1742
1743 return err;
1744}
1745
1746/* ---- Interface to HCI drivers ---- */
1747
611b30f7
MH
1748static int hci_rfkill_set_block(void *data, bool blocked)
1749{
1750 struct hci_dev *hdev = data;
1751
1752 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1753
0736cfa8
MH
1754 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1755 return -EBUSY;
1756
5e130367
JH
1757 if (blocked) {
1758 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
1759 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1760 hci_dev_do_close(hdev);
5e130367
JH
1761 } else {
1762 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 1763 }
611b30f7
MH
1764
1765 return 0;
1766}
1767
1768static const struct rfkill_ops hci_rfkill_ops = {
1769 .set_block = hci_rfkill_set_block,
1770};
1771
ab81cbf9
JH
1772static void hci_power_on(struct work_struct *work)
1773{
1774 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 1775 int err;
ab81cbf9
JH
1776
1777 BT_DBG("%s", hdev->name);
1778
cbed0ca1 1779 err = hci_dev_do_open(hdev);
96570ffc
JH
1780 if (err < 0) {
1781 mgmt_set_powered_failed(hdev, err);
ab81cbf9 1782 return;
96570ffc 1783 }
ab81cbf9 1784
a5c8f270
MH
1785 /* During the HCI setup phase, a few error conditions are
1786 * ignored and they need to be checked now. If they are still
1787 * valid, it is important to turn the device back off.
1788 */
1789 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1790 (hdev->dev_type == HCI_BREDR &&
1791 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1792 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
1793 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1794 hci_dev_do_close(hdev);
1795 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
1796 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1797 HCI_AUTO_OFF_TIMEOUT);
bf543036 1798 }
ab81cbf9 1799
a8b2d5c2 1800 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1801 mgmt_index_added(hdev);
ab81cbf9
JH
1802}
1803
1804static void hci_power_off(struct work_struct *work)
1805{
3243553f 1806 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1807 power_off.work);
ab81cbf9
JH
1808
1809 BT_DBG("%s", hdev->name);
1810
8ee56540 1811 hci_dev_do_close(hdev);
ab81cbf9
JH
1812}
1813
16ab91ab
JH
1814static void hci_discov_off(struct work_struct *work)
1815{
1816 struct hci_dev *hdev;
16ab91ab
JH
1817
1818 hdev = container_of(work, struct hci_dev, discov_off.work);
1819
1820 BT_DBG("%s", hdev->name);
1821
d1967ff8 1822 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
1823}
1824
2aeb9a1a
JH
1825int hci_uuids_clear(struct hci_dev *hdev)
1826{
4821002c 1827 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1828
4821002c
JH
1829 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1830 list_del(&uuid->list);
2aeb9a1a
JH
1831 kfree(uuid);
1832 }
1833
1834 return 0;
1835}
1836
55ed8ca1
JH
1837int hci_link_keys_clear(struct hci_dev *hdev)
1838{
1839 struct list_head *p, *n;
1840
1841 list_for_each_safe(p, n, &hdev->link_keys) {
1842 struct link_key *key;
1843
1844 key = list_entry(p, struct link_key, list);
1845
1846 list_del(p);
1847 kfree(key);
1848 }
1849
1850 return 0;
1851}
1852
b899efaf
VCG
1853int hci_smp_ltks_clear(struct hci_dev *hdev)
1854{
1855 struct smp_ltk *k, *tmp;
1856
1857 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1858 list_del(&k->list);
1859 kfree(k);
1860 }
1861
1862 return 0;
1863}
1864
55ed8ca1
JH
1865struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1866{
8035ded4 1867 struct link_key *k;
55ed8ca1 1868
8035ded4 1869 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1870 if (bacmp(bdaddr, &k->bdaddr) == 0)
1871 return k;
55ed8ca1
JH
1872
1873 return NULL;
1874}
1875
745c0ce3 1876static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1877 u8 key_type, u8 old_key_type)
d25e28ab
JH
1878{
1879 /* Legacy key */
1880 if (key_type < 0x03)
745c0ce3 1881 return true;
d25e28ab
JH
1882
1883 /* Debug keys are insecure so don't store them persistently */
1884 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1885 return false;
d25e28ab
JH
1886
1887 /* Changed combination key and there's no previous one */
1888 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1889 return false;
d25e28ab
JH
1890
1891 /* Security mode 3 case */
1892 if (!conn)
745c0ce3 1893 return true;
d25e28ab
JH
1894
1895 /* Neither local nor remote side had no-bonding as requirement */
1896 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1897 return true;
d25e28ab
JH
1898
1899 /* Local side had dedicated bonding as requirement */
1900 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1901 return true;
d25e28ab
JH
1902
1903 /* Remote side had dedicated bonding as requirement */
1904 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1905 return true;
d25e28ab
JH
1906
1907 /* If none of the above criteria match, then don't store the key
1908 * persistently */
745c0ce3 1909 return false;
d25e28ab
JH
1910}
1911
c9839a11 1912struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1913{
c9839a11 1914 struct smp_ltk *k;
75d262c2 1915
c9839a11
VCG
1916 list_for_each_entry(k, &hdev->long_term_keys, list) {
1917 if (k->ediv != ediv ||
a8c5fb1a 1918 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1919 continue;
1920
c9839a11 1921 return k;
75d262c2
VCG
1922 }
1923
1924 return NULL;
1925}
75d262c2 1926
c9839a11 1927struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1928 u8 addr_type)
75d262c2 1929{
c9839a11 1930 struct smp_ltk *k;
75d262c2 1931
c9839a11
VCG
1932 list_for_each_entry(k, &hdev->long_term_keys, list)
1933 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1934 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1935 return k;
1936
1937 return NULL;
1938}
75d262c2 1939
d25e28ab 1940int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1941 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1942{
1943 struct link_key *key, *old_key;
745c0ce3
VA
1944 u8 old_key_type;
1945 bool persistent;
55ed8ca1
JH
1946
1947 old_key = hci_find_link_key(hdev, bdaddr);
1948 if (old_key) {
1949 old_key_type = old_key->type;
1950 key = old_key;
1951 } else {
12adcf3a 1952 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1953 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1954 if (!key)
1955 return -ENOMEM;
1956 list_add(&key->list, &hdev->link_keys);
1957 }
1958
6ed93dc6 1959 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1960
d25e28ab
JH
1961 /* Some buggy controller combinations generate a changed
1962 * combination key for legacy pairing even when there's no
1963 * previous key */
1964 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1965 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1966 type = HCI_LK_COMBINATION;
655fe6ec
JH
1967 if (conn)
1968 conn->key_type = type;
1969 }
d25e28ab 1970
55ed8ca1 1971 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1972 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1973 key->pin_len = pin_len;
1974
b6020ba0 1975 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1976 key->type = old_key_type;
4748fed2
JH
1977 else
1978 key->type = type;
1979
4df378a1
JH
1980 if (!new_key)
1981 return 0;
1982
1983 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1984
744cf19e 1985 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1986
6ec5bcad
VA
1987 if (conn)
1988 conn->flush_key = !persistent;
55ed8ca1
JH
1989
1990 return 0;
1991}
1992
c9839a11 1993int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1994 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1995 ediv, u8 rand[8])
75d262c2 1996{
c9839a11 1997 struct smp_ltk *key, *old_key;
75d262c2 1998
c9839a11
VCG
1999 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2000 return 0;
75d262c2 2001
c9839a11
VCG
2002 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2003 if (old_key)
75d262c2 2004 key = old_key;
c9839a11
VCG
2005 else {
2006 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
2007 if (!key)
2008 return -ENOMEM;
c9839a11 2009 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2010 }
2011
75d262c2 2012 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2013 key->bdaddr_type = addr_type;
2014 memcpy(key->val, tk, sizeof(key->val));
2015 key->authenticated = authenticated;
2016 key->ediv = ediv;
2017 key->enc_size = enc_size;
2018 key->type = type;
2019 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2020
c9839a11
VCG
2021 if (!new_key)
2022 return 0;
75d262c2 2023
261cc5aa
VCG
2024 if (type & HCI_SMP_LTK)
2025 mgmt_new_ltk(hdev, key, 1);
2026
75d262c2
VCG
2027 return 0;
2028}
2029
55ed8ca1
JH
2030int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2031{
2032 struct link_key *key;
2033
2034 key = hci_find_link_key(hdev, bdaddr);
2035 if (!key)
2036 return -ENOENT;
2037
6ed93dc6 2038 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2039
2040 list_del(&key->list);
2041 kfree(key);
2042
2043 return 0;
2044}
2045
b899efaf
VCG
2046int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2047{
2048 struct smp_ltk *k, *tmp;
2049
2050 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2051 if (bacmp(bdaddr, &k->bdaddr))
2052 continue;
2053
6ed93dc6 2054 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2055
2056 list_del(&k->list);
2057 kfree(k);
2058 }
2059
2060 return 0;
2061}
2062
6bd32326 2063/* HCI command timer function */
bda4f23a 2064static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2065{
2066 struct hci_dev *hdev = (void *) arg;
2067
bda4f23a
AE
2068 if (hdev->sent_cmd) {
2069 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2070 u16 opcode = __le16_to_cpu(sent->opcode);
2071
2072 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2073 } else {
2074 BT_ERR("%s command tx timeout", hdev->name);
2075 }
2076
6bd32326 2077 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2078 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2079}
2080
2763eda6 2081struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2082 bdaddr_t *bdaddr)
2763eda6
SJ
2083{
2084 struct oob_data *data;
2085
2086 list_for_each_entry(data, &hdev->remote_oob_data, list)
2087 if (bacmp(bdaddr, &data->bdaddr) == 0)
2088 return data;
2089
2090 return NULL;
2091}
2092
2093int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2094{
2095 struct oob_data *data;
2096
2097 data = hci_find_remote_oob_data(hdev, bdaddr);
2098 if (!data)
2099 return -ENOENT;
2100
6ed93dc6 2101 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2102
2103 list_del(&data->list);
2104 kfree(data);
2105
2106 return 0;
2107}
2108
2109int hci_remote_oob_data_clear(struct hci_dev *hdev)
2110{
2111 struct oob_data *data, *n;
2112
2113 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2114 list_del(&data->list);
2115 kfree(data);
2116 }
2117
2118 return 0;
2119}
2120
2121int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 2122 u8 *randomizer)
2763eda6
SJ
2123{
2124 struct oob_data *data;
2125
2126 data = hci_find_remote_oob_data(hdev, bdaddr);
2127
2128 if (!data) {
2129 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2130 if (!data)
2131 return -ENOMEM;
2132
2133 bacpy(&data->bdaddr, bdaddr);
2134 list_add(&data->list, &hdev->remote_oob_data);
2135 }
2136
2137 memcpy(data->hash, hash, sizeof(data->hash));
2138 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2139
6ed93dc6 2140 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2141
2142 return 0;
2143}
2144
04124681 2145struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 2146{
8035ded4 2147 struct bdaddr_list *b;
b2a66aad 2148
8035ded4 2149 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
2150 if (bacmp(bdaddr, &b->bdaddr) == 0)
2151 return b;
b2a66aad
AJ
2152
2153 return NULL;
2154}
2155
2156int hci_blacklist_clear(struct hci_dev *hdev)
2157{
2158 struct list_head *p, *n;
2159
2160 list_for_each_safe(p, n, &hdev->blacklist) {
2161 struct bdaddr_list *b;
2162
2163 b = list_entry(p, struct bdaddr_list, list);
2164
2165 list_del(p);
2166 kfree(b);
2167 }
2168
2169 return 0;
2170}
2171
88c1fe4b 2172int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2173{
2174 struct bdaddr_list *entry;
b2a66aad
AJ
2175
2176 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2177 return -EBADF;
2178
5e762444
AJ
2179 if (hci_blacklist_lookup(hdev, bdaddr))
2180 return -EEXIST;
b2a66aad
AJ
2181
2182 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2183 if (!entry)
2184 return -ENOMEM;
b2a66aad
AJ
2185
2186 bacpy(&entry->bdaddr, bdaddr);
2187
2188 list_add(&entry->list, &hdev->blacklist);
2189
88c1fe4b 2190 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2191}
2192
88c1fe4b 2193int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2194{
2195 struct bdaddr_list *entry;
b2a66aad 2196
1ec918ce 2197 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 2198 return hci_blacklist_clear(hdev);
b2a66aad
AJ
2199
2200 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 2201 if (!entry)
5e762444 2202 return -ENOENT;
b2a66aad
AJ
2203
2204 list_del(&entry->list);
2205 kfree(entry);
2206
88c1fe4b 2207 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2208}
2209
4c87eaab 2210static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2211{
4c87eaab
AG
2212 if (status) {
2213 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2214
4c87eaab
AG
2215 hci_dev_lock(hdev);
2216 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2217 hci_dev_unlock(hdev);
2218 return;
2219 }
7ba8b4be
AG
2220}
2221
4c87eaab 2222static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2223{
4c87eaab
AG
2224 /* General inquiry access code (GIAC) */
2225 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2226 struct hci_request req;
2227 struct hci_cp_inquiry cp;
7ba8b4be
AG
2228 int err;
2229
4c87eaab
AG
2230 if (status) {
2231 BT_ERR("Failed to disable LE scanning: status %d", status);
2232 return;
2233 }
7ba8b4be 2234
4c87eaab
AG
2235 switch (hdev->discovery.type) {
2236 case DISCOV_TYPE_LE:
2237 hci_dev_lock(hdev);
2238 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2239 hci_dev_unlock(hdev);
2240 break;
7ba8b4be 2241
4c87eaab
AG
2242 case DISCOV_TYPE_INTERLEAVED:
2243 hci_req_init(&req, hdev);
7ba8b4be 2244
4c87eaab
AG
2245 memset(&cp, 0, sizeof(cp));
2246 memcpy(&cp.lap, lap, sizeof(cp.lap));
2247 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2248 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2249
4c87eaab 2250 hci_dev_lock(hdev);
7dbfac1d 2251
4c87eaab 2252 hci_inquiry_cache_flush(hdev);
7dbfac1d 2253
4c87eaab
AG
2254 err = hci_req_run(&req, inquiry_complete);
2255 if (err) {
2256 BT_ERR("Inquiry request failed: err %d", err);
2257 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2258 }
7dbfac1d 2259
4c87eaab
AG
2260 hci_dev_unlock(hdev);
2261 break;
7dbfac1d 2262 }
7dbfac1d
AG
2263}
2264
7ba8b4be
AG
2265static void le_scan_disable_work(struct work_struct *work)
2266{
2267 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2268 le_scan_disable.work);
7ba8b4be 2269 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
2270 struct hci_request req;
2271 int err;
7ba8b4be
AG
2272
2273 BT_DBG("%s", hdev->name);
2274
4c87eaab 2275 hci_req_init(&req, hdev);
28b75a89 2276
7ba8b4be 2277 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
2278 cp.enable = LE_SCAN_DISABLE;
2279 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 2280
4c87eaab
AG
2281 err = hci_req_run(&req, le_scan_disable_work_complete);
2282 if (err)
2283 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2284}
2285
9be0dab7
DH
2286/* Alloc HCI device */
2287struct hci_dev *hci_alloc_dev(void)
2288{
2289 struct hci_dev *hdev;
2290
2291 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2292 if (!hdev)
2293 return NULL;
2294
b1b813d4
DH
2295 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2296 hdev->esco_type = (ESCO_HV1);
2297 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2298 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2299 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2300 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2301 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2302
b1b813d4
DH
2303 hdev->sniff_max_interval = 800;
2304 hdev->sniff_min_interval = 80;
2305
bef64738
MH
2306 hdev->le_scan_interval = 0x0060;
2307 hdev->le_scan_window = 0x0030;
2308
b1b813d4
DH
2309 mutex_init(&hdev->lock);
2310 mutex_init(&hdev->req_lock);
2311
2312 INIT_LIST_HEAD(&hdev->mgmt_pending);
2313 INIT_LIST_HEAD(&hdev->blacklist);
2314 INIT_LIST_HEAD(&hdev->uuids);
2315 INIT_LIST_HEAD(&hdev->link_keys);
2316 INIT_LIST_HEAD(&hdev->long_term_keys);
2317 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2318 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2319
2320 INIT_WORK(&hdev->rx_work, hci_rx_work);
2321 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2322 INIT_WORK(&hdev->tx_work, hci_tx_work);
2323 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2324
b1b813d4
DH
2325 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2326 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2327 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2328
b1b813d4
DH
2329 skb_queue_head_init(&hdev->rx_q);
2330 skb_queue_head_init(&hdev->cmd_q);
2331 skb_queue_head_init(&hdev->raw_q);
2332
2333 init_waitqueue_head(&hdev->req_wait_q);
2334
bda4f23a 2335 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2336
b1b813d4
DH
2337 hci_init_sysfs(hdev);
2338 discovery_init(hdev);
9be0dab7
DH
2339
2340 return hdev;
2341}
2342EXPORT_SYMBOL(hci_alloc_dev);
2343
2344/* Free HCI device */
2345void hci_free_dev(struct hci_dev *hdev)
2346{
9be0dab7
DH
2347 /* will free via device release */
2348 put_device(&hdev->dev);
2349}
2350EXPORT_SYMBOL(hci_free_dev);
2351
1da177e4
LT
2352/* Register HCI device */
2353int hci_register_dev(struct hci_dev *hdev)
2354{
b1b813d4 2355 int id, error;
1da177e4 2356
010666a1 2357 if (!hdev->open || !hdev->close)
1da177e4
LT
2358 return -EINVAL;
2359
08add513
MM
2360 /* Do not allow HCI_AMP devices to register at index 0,
2361 * so the index can be used as the AMP controller ID.
2362 */
3df92b31
SL
2363 switch (hdev->dev_type) {
2364 case HCI_BREDR:
2365 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2366 break;
2367 case HCI_AMP:
2368 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2369 break;
2370 default:
2371 return -EINVAL;
1da177e4 2372 }
8e87d142 2373
3df92b31
SL
2374 if (id < 0)
2375 return id;
2376
1da177e4
LT
2377 sprintf(hdev->name, "hci%d", id);
2378 hdev->id = id;
2d8b3a11
AE
2379
2380 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2381
d8537548
KC
2382 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2383 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
2384 if (!hdev->workqueue) {
2385 error = -ENOMEM;
2386 goto err;
2387 }
f48fd9c8 2388
d8537548
KC
2389 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2390 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
2391 if (!hdev->req_workqueue) {
2392 destroy_workqueue(hdev->workqueue);
2393 error = -ENOMEM;
2394 goto err;
2395 }
2396
33ca954d
DH
2397 error = hci_add_sysfs(hdev);
2398 if (error < 0)
2399 goto err_wqueue;
1da177e4 2400
611b30f7 2401 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2402 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2403 hdev);
611b30f7
MH
2404 if (hdev->rfkill) {
2405 if (rfkill_register(hdev->rfkill) < 0) {
2406 rfkill_destroy(hdev->rfkill);
2407 hdev->rfkill = NULL;
2408 }
2409 }
2410
5e130367
JH
2411 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2412 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2413
a8b2d5c2 2414 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 2415 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 2416
01cd3404 2417 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
2418 /* Assume BR/EDR support until proven otherwise (such as
2419 * through reading supported features during init.
2420 */
2421 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2422 }
ce2be9ac 2423
fcee3377
GP
2424 write_lock(&hci_dev_list_lock);
2425 list_add(&hdev->list, &hci_dev_list);
2426 write_unlock(&hci_dev_list_lock);
2427
1da177e4 2428 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2429 hci_dev_hold(hdev);
1da177e4 2430
19202573 2431 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2432
1da177e4 2433 return id;
f48fd9c8 2434
33ca954d
DH
2435err_wqueue:
2436 destroy_workqueue(hdev->workqueue);
6ead1bbc 2437 destroy_workqueue(hdev->req_workqueue);
33ca954d 2438err:
3df92b31 2439 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 2440
33ca954d 2441 return error;
1da177e4
LT
2442}
2443EXPORT_SYMBOL(hci_register_dev);
2444
2445/* Unregister HCI device */
59735631 2446void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2447{
3df92b31 2448 int i, id;
ef222013 2449
c13854ce 2450 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2451
94324962
JH
2452 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2453
3df92b31
SL
2454 id = hdev->id;
2455
f20d09d5 2456 write_lock(&hci_dev_list_lock);
1da177e4 2457 list_del(&hdev->list);
f20d09d5 2458 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2459
2460 hci_dev_do_close(hdev);
2461
cd4c5391 2462 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2463 kfree_skb(hdev->reassembly[i]);
2464
b9b5ef18
GP
2465 cancel_work_sync(&hdev->power_on);
2466
ab81cbf9 2467 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2468 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2469 hci_dev_lock(hdev);
744cf19e 2470 mgmt_index_removed(hdev);
09fd0de5 2471 hci_dev_unlock(hdev);
56e5cb86 2472 }
ab81cbf9 2473
2e58ef3e
JH
2474 /* mgmt_index_removed should take care of emptying the
2475 * pending list */
2476 BUG_ON(!list_empty(&hdev->mgmt_pending));
2477
1da177e4
LT
2478 hci_notify(hdev, HCI_DEV_UNREG);
2479
611b30f7
MH
2480 if (hdev->rfkill) {
2481 rfkill_unregister(hdev->rfkill);
2482 rfkill_destroy(hdev->rfkill);
2483 }
2484
ce242970 2485 hci_del_sysfs(hdev);
147e2d59 2486
f48fd9c8 2487 destroy_workqueue(hdev->workqueue);
6ead1bbc 2488 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2489
09fd0de5 2490 hci_dev_lock(hdev);
e2e0cacb 2491 hci_blacklist_clear(hdev);
2aeb9a1a 2492 hci_uuids_clear(hdev);
55ed8ca1 2493 hci_link_keys_clear(hdev);
b899efaf 2494 hci_smp_ltks_clear(hdev);
2763eda6 2495 hci_remote_oob_data_clear(hdev);
09fd0de5 2496 hci_dev_unlock(hdev);
e2e0cacb 2497
dc946bd8 2498 hci_dev_put(hdev);
3df92b31
SL
2499
2500 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2501}
2502EXPORT_SYMBOL(hci_unregister_dev);
2503
2504/* Suspend HCI device */
2505int hci_suspend_dev(struct hci_dev *hdev)
2506{
2507 hci_notify(hdev, HCI_DEV_SUSPEND);
2508 return 0;
2509}
2510EXPORT_SYMBOL(hci_suspend_dev);
2511
2512/* Resume HCI device */
2513int hci_resume_dev(struct hci_dev *hdev)
2514{
2515 hci_notify(hdev, HCI_DEV_RESUME);
2516 return 0;
2517}
2518EXPORT_SYMBOL(hci_resume_dev);
2519
76bca880 2520/* Receive frame from HCI drivers */
e1a26170 2521int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 2522{
76bca880 2523 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2524 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2525 kfree_skb(skb);
2526 return -ENXIO;
2527 }
2528
d82603c6 2529 /* Incoming skb */
76bca880
MH
2530 bt_cb(skb)->incoming = 1;
2531
2532 /* Time stamp */
2533 __net_timestamp(skb);
2534
76bca880 2535 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2536 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2537
76bca880
MH
2538 return 0;
2539}
2540EXPORT_SYMBOL(hci_recv_frame);
2541
33e882a5 2542static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2543 int count, __u8 index)
33e882a5
SS
2544{
2545 int len = 0;
2546 int hlen = 0;
2547 int remain = count;
2548 struct sk_buff *skb;
2549 struct bt_skb_cb *scb;
2550
2551 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2552 index >= NUM_REASSEMBLY)
33e882a5
SS
2553 return -EILSEQ;
2554
2555 skb = hdev->reassembly[index];
2556
2557 if (!skb) {
2558 switch (type) {
2559 case HCI_ACLDATA_PKT:
2560 len = HCI_MAX_FRAME_SIZE;
2561 hlen = HCI_ACL_HDR_SIZE;
2562 break;
2563 case HCI_EVENT_PKT:
2564 len = HCI_MAX_EVENT_SIZE;
2565 hlen = HCI_EVENT_HDR_SIZE;
2566 break;
2567 case HCI_SCODATA_PKT:
2568 len = HCI_MAX_SCO_SIZE;
2569 hlen = HCI_SCO_HDR_SIZE;
2570 break;
2571 }
2572
1e429f38 2573 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2574 if (!skb)
2575 return -ENOMEM;
2576
2577 scb = (void *) skb->cb;
2578 scb->expect = hlen;
2579 scb->pkt_type = type;
2580
33e882a5
SS
2581 hdev->reassembly[index] = skb;
2582 }
2583
2584 while (count) {
2585 scb = (void *) skb->cb;
89bb46d0 2586 len = min_t(uint, scb->expect, count);
33e882a5
SS
2587
2588 memcpy(skb_put(skb, len), data, len);
2589
2590 count -= len;
2591 data += len;
2592 scb->expect -= len;
2593 remain = count;
2594
2595 switch (type) {
2596 case HCI_EVENT_PKT:
2597 if (skb->len == HCI_EVENT_HDR_SIZE) {
2598 struct hci_event_hdr *h = hci_event_hdr(skb);
2599 scb->expect = h->plen;
2600
2601 if (skb_tailroom(skb) < scb->expect) {
2602 kfree_skb(skb);
2603 hdev->reassembly[index] = NULL;
2604 return -ENOMEM;
2605 }
2606 }
2607 break;
2608
2609 case HCI_ACLDATA_PKT:
2610 if (skb->len == HCI_ACL_HDR_SIZE) {
2611 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2612 scb->expect = __le16_to_cpu(h->dlen);
2613
2614 if (skb_tailroom(skb) < scb->expect) {
2615 kfree_skb(skb);
2616 hdev->reassembly[index] = NULL;
2617 return -ENOMEM;
2618 }
2619 }
2620 break;
2621
2622 case HCI_SCODATA_PKT:
2623 if (skb->len == HCI_SCO_HDR_SIZE) {
2624 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2625 scb->expect = h->dlen;
2626
2627 if (skb_tailroom(skb) < scb->expect) {
2628 kfree_skb(skb);
2629 hdev->reassembly[index] = NULL;
2630 return -ENOMEM;
2631 }
2632 }
2633 break;
2634 }
2635
2636 if (scb->expect == 0) {
2637 /* Complete frame */
2638
2639 bt_cb(skb)->pkt_type = type;
e1a26170 2640 hci_recv_frame(hdev, skb);
33e882a5
SS
2641
2642 hdev->reassembly[index] = NULL;
2643 return remain;
2644 }
2645 }
2646
2647 return remain;
2648}
2649
ef222013
MH
2650int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2651{
f39a3c06
SS
2652 int rem = 0;
2653
ef222013
MH
2654 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2655 return -EILSEQ;
2656
da5f6c37 2657 while (count) {
1e429f38 2658 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2659 if (rem < 0)
2660 return rem;
ef222013 2661
f39a3c06
SS
2662 data += (count - rem);
2663 count = rem;
f81c6224 2664 }
ef222013 2665
f39a3c06 2666 return rem;
ef222013
MH
2667}
2668EXPORT_SYMBOL(hci_recv_fragment);
2669
99811510
SS
2670#define STREAM_REASSEMBLY 0
2671
2672int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2673{
2674 int type;
2675 int rem = 0;
2676
da5f6c37 2677 while (count) {
99811510
SS
2678 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2679
2680 if (!skb) {
2681 struct { char type; } *pkt;
2682
2683 /* Start of the frame */
2684 pkt = data;
2685 type = pkt->type;
2686
2687 data++;
2688 count--;
2689 } else
2690 type = bt_cb(skb)->pkt_type;
2691
1e429f38 2692 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2693 STREAM_REASSEMBLY);
99811510
SS
2694 if (rem < 0)
2695 return rem;
2696
2697 data += (count - rem);
2698 count = rem;
f81c6224 2699 }
99811510
SS
2700
2701 return rem;
2702}
2703EXPORT_SYMBOL(hci_recv_stream_fragment);
2704
1da177e4
LT
2705/* ---- Interface to upper protocols ---- */
2706
1da177e4
LT
2707int hci_register_cb(struct hci_cb *cb)
2708{
2709 BT_DBG("%p name %s", cb, cb->name);
2710
f20d09d5 2711 write_lock(&hci_cb_list_lock);
1da177e4 2712 list_add(&cb->list, &hci_cb_list);
f20d09d5 2713 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2714
2715 return 0;
2716}
2717EXPORT_SYMBOL(hci_register_cb);
2718
2719int hci_unregister_cb(struct hci_cb *cb)
2720{
2721 BT_DBG("%p name %s", cb, cb->name);
2722
f20d09d5 2723 write_lock(&hci_cb_list_lock);
1da177e4 2724 list_del(&cb->list);
f20d09d5 2725 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2726
2727 return 0;
2728}
2729EXPORT_SYMBOL(hci_unregister_cb);
2730
51086991 2731static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 2732{
0d48d939 2733 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2734
cd82e61c
MH
2735 /* Time stamp */
2736 __net_timestamp(skb);
1da177e4 2737
cd82e61c
MH
2738 /* Send copy to monitor */
2739 hci_send_to_monitor(hdev, skb);
2740
2741 if (atomic_read(&hdev->promisc)) {
2742 /* Send copy to the sockets */
470fe1b5 2743 hci_send_to_sock(hdev, skb);
1da177e4
LT
2744 }
2745
2746 /* Get rid of skb owner, prior to sending to the driver. */
2747 skb_orphan(skb);
2748
7bd8f09f 2749 if (hdev->send(hdev, skb) < 0)
51086991 2750 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
2751}
2752
3119ae95
JH
2753void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2754{
2755 skb_queue_head_init(&req->cmd_q);
2756 req->hdev = hdev;
5d73e034 2757 req->err = 0;
3119ae95
JH
2758}
2759
2760int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2761{
2762 struct hci_dev *hdev = req->hdev;
2763 struct sk_buff *skb;
2764 unsigned long flags;
2765
2766 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2767
5d73e034
AG
2768 /* If an error occured during request building, remove all HCI
2769 * commands queued on the HCI request queue.
2770 */
2771 if (req->err) {
2772 skb_queue_purge(&req->cmd_q);
2773 return req->err;
2774 }
2775
3119ae95
JH
2776 /* Do not allow empty requests */
2777 if (skb_queue_empty(&req->cmd_q))
382b0c39 2778 return -ENODATA;
3119ae95
JH
2779
2780 skb = skb_peek_tail(&req->cmd_q);
2781 bt_cb(skb)->req.complete = complete;
2782
2783 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2784 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2785 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2786
2787 queue_work(hdev->workqueue, &hdev->cmd_work);
2788
2789 return 0;
2790}
2791
1ca3a9d0 2792static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 2793 u32 plen, const void *param)
1da177e4
LT
2794{
2795 int len = HCI_COMMAND_HDR_SIZE + plen;
2796 struct hci_command_hdr *hdr;
2797 struct sk_buff *skb;
2798
1da177e4 2799 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2800 if (!skb)
2801 return NULL;
1da177e4
LT
2802
2803 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2804 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2805 hdr->plen = plen;
2806
2807 if (plen)
2808 memcpy(skb_put(skb, plen), param, plen);
2809
2810 BT_DBG("skb len %d", skb->len);
2811
0d48d939 2812 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 2813
1ca3a9d0
JH
2814 return skb;
2815}
2816
2817/* Send HCI command */
07dc93dd
JH
2818int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2819 const void *param)
1ca3a9d0
JH
2820{
2821 struct sk_buff *skb;
2822
2823 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2824
2825 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2826 if (!skb) {
2827 BT_ERR("%s no memory for command", hdev->name);
2828 return -ENOMEM;
2829 }
2830
11714b3d
JH
2831 /* Stand-alone HCI commands must be flaged as
2832 * single-command requests.
2833 */
2834 bt_cb(skb)->req.start = true;
2835
1da177e4 2836 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2837 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2838
2839 return 0;
2840}
1da177e4 2841
71c76a17 2842/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
2843void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2844 const void *param, u8 event)
71c76a17
JH
2845{
2846 struct hci_dev *hdev = req->hdev;
2847 struct sk_buff *skb;
2848
2849 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2850
34739c1e
AG
2851 /* If an error occured during request building, there is no point in
2852 * queueing the HCI command. We can simply return.
2853 */
2854 if (req->err)
2855 return;
2856
71c76a17
JH
2857 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2858 if (!skb) {
5d73e034
AG
2859 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2860 hdev->name, opcode);
2861 req->err = -ENOMEM;
e348fe6b 2862 return;
71c76a17
JH
2863 }
2864
2865 if (skb_queue_empty(&req->cmd_q))
2866 bt_cb(skb)->req.start = true;
2867
02350a72
JH
2868 bt_cb(skb)->req.event = event;
2869
71c76a17 2870 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2871}
2872
07dc93dd
JH
2873void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2874 const void *param)
02350a72
JH
2875{
2876 hci_req_add_ev(req, opcode, plen, param, 0);
2877}
2878
1da177e4 2879/* Get data from the previously sent command */
a9de9248 2880void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2881{
2882 struct hci_command_hdr *hdr;
2883
2884 if (!hdev->sent_cmd)
2885 return NULL;
2886
2887 hdr = (void *) hdev->sent_cmd->data;
2888
a9de9248 2889 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2890 return NULL;
2891
f0e09510 2892 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2893
2894 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2895}
2896
2897/* Send ACL data */
2898static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2899{
2900 struct hci_acl_hdr *hdr;
2901 int len = skb->len;
2902
badff6d0
ACM
2903 skb_push(skb, HCI_ACL_HDR_SIZE);
2904 skb_reset_transport_header(skb);
9c70220b 2905 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2906 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2907 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2908}
2909
ee22be7e 2910static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2911 struct sk_buff *skb, __u16 flags)
1da177e4 2912{
ee22be7e 2913 struct hci_conn *conn = chan->conn;
1da177e4
LT
2914 struct hci_dev *hdev = conn->hdev;
2915 struct sk_buff *list;
2916
087bfd99
GP
2917 skb->len = skb_headlen(skb);
2918 skb->data_len = 0;
2919
2920 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2921
2922 switch (hdev->dev_type) {
2923 case HCI_BREDR:
2924 hci_add_acl_hdr(skb, conn->handle, flags);
2925 break;
2926 case HCI_AMP:
2927 hci_add_acl_hdr(skb, chan->handle, flags);
2928 break;
2929 default:
2930 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2931 return;
2932 }
087bfd99 2933
70f23020
AE
2934 list = skb_shinfo(skb)->frag_list;
2935 if (!list) {
1da177e4
LT
2936 /* Non fragmented */
2937 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2938
73d80deb 2939 skb_queue_tail(queue, skb);
1da177e4
LT
2940 } else {
2941 /* Fragmented */
2942 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2943
2944 skb_shinfo(skb)->frag_list = NULL;
2945
2946 /* Queue all fragments atomically */
af3e6359 2947 spin_lock(&queue->lock);
1da177e4 2948
73d80deb 2949 __skb_queue_tail(queue, skb);
e702112f
AE
2950
2951 flags &= ~ACL_START;
2952 flags |= ACL_CONT;
1da177e4
LT
2953 do {
2954 skb = list; list = list->next;
8e87d142 2955
0d48d939 2956 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2957 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2958
2959 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2960
73d80deb 2961 __skb_queue_tail(queue, skb);
1da177e4
LT
2962 } while (list);
2963
af3e6359 2964 spin_unlock(&queue->lock);
1da177e4 2965 }
73d80deb
LAD
2966}
2967
2968void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2969{
ee22be7e 2970 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2971
f0e09510 2972 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 2973
ee22be7e 2974 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2975
3eff45ea 2976 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2977}
1da177e4
LT
2978
2979/* Send SCO data */
0d861d8b 2980void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2981{
2982 struct hci_dev *hdev = conn->hdev;
2983 struct hci_sco_hdr hdr;
2984
2985 BT_DBG("%s len %d", hdev->name, skb->len);
2986
aca3192c 2987 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2988 hdr.dlen = skb->len;
2989
badff6d0
ACM
2990 skb_push(skb, HCI_SCO_HDR_SIZE);
2991 skb_reset_transport_header(skb);
9c70220b 2992 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 2993
0d48d939 2994 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2995
1da177e4 2996 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2997 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2998}
1da177e4
LT
2999
3000/* ---- HCI TX task (outgoing data) ---- */
3001
3002/* HCI Connection scheduler */
6039aa73
GP
3003static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3004 int *quote)
1da177e4
LT
3005{
3006 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3007 struct hci_conn *conn = NULL, *c;
abc5de8f 3008 unsigned int num = 0, min = ~0;
1da177e4 3009
8e87d142 3010 /* We don't have to lock device here. Connections are always
1da177e4 3011 * added and removed with TX task disabled. */
bf4c6325
GP
3012
3013 rcu_read_lock();
3014
3015 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3016 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3017 continue;
769be974
MH
3018
3019 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3020 continue;
3021
1da177e4
LT
3022 num++;
3023
3024 if (c->sent < min) {
3025 min = c->sent;
3026 conn = c;
3027 }
52087a79
LAD
3028
3029 if (hci_conn_num(hdev, type) == num)
3030 break;
1da177e4
LT
3031 }
3032
bf4c6325
GP
3033 rcu_read_unlock();
3034
1da177e4 3035 if (conn) {
6ed58ec5
VT
3036 int cnt, q;
3037
3038 switch (conn->type) {
3039 case ACL_LINK:
3040 cnt = hdev->acl_cnt;
3041 break;
3042 case SCO_LINK:
3043 case ESCO_LINK:
3044 cnt = hdev->sco_cnt;
3045 break;
3046 case LE_LINK:
3047 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3048 break;
3049 default:
3050 cnt = 0;
3051 BT_ERR("Unknown link type");
3052 }
3053
3054 q = cnt / num;
1da177e4
LT
3055 *quote = q ? q : 1;
3056 } else
3057 *quote = 0;
3058
3059 BT_DBG("conn %p quote %d", conn, *quote);
3060 return conn;
3061}
3062
6039aa73 3063static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3064{
3065 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3066 struct hci_conn *c;
1da177e4 3067
bae1f5d9 3068 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3069
bf4c6325
GP
3070 rcu_read_lock();
3071
1da177e4 3072 /* Kill stalled connections */
bf4c6325 3073 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3074 if (c->type == type && c->sent) {
6ed93dc6
AE
3075 BT_ERR("%s killing stalled connection %pMR",
3076 hdev->name, &c->dst);
bed71748 3077 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3078 }
3079 }
bf4c6325
GP
3080
3081 rcu_read_unlock();
1da177e4
LT
3082}
3083
6039aa73
GP
3084static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3085 int *quote)
1da177e4 3086{
73d80deb
LAD
3087 struct hci_conn_hash *h = &hdev->conn_hash;
3088 struct hci_chan *chan = NULL;
abc5de8f 3089 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3090 struct hci_conn *conn;
73d80deb
LAD
3091 int cnt, q, conn_num = 0;
3092
3093 BT_DBG("%s", hdev->name);
3094
bf4c6325
GP
3095 rcu_read_lock();
3096
3097 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3098 struct hci_chan *tmp;
3099
3100 if (conn->type != type)
3101 continue;
3102
3103 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3104 continue;
3105
3106 conn_num++;
3107
8192edef 3108 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3109 struct sk_buff *skb;
3110
3111 if (skb_queue_empty(&tmp->data_q))
3112 continue;
3113
3114 skb = skb_peek(&tmp->data_q);
3115 if (skb->priority < cur_prio)
3116 continue;
3117
3118 if (skb->priority > cur_prio) {
3119 num = 0;
3120 min = ~0;
3121 cur_prio = skb->priority;
3122 }
3123
3124 num++;
3125
3126 if (conn->sent < min) {
3127 min = conn->sent;
3128 chan = tmp;
3129 }
3130 }
3131
3132 if (hci_conn_num(hdev, type) == conn_num)
3133 break;
3134 }
3135
bf4c6325
GP
3136 rcu_read_unlock();
3137
73d80deb
LAD
3138 if (!chan)
3139 return NULL;
3140
3141 switch (chan->conn->type) {
3142 case ACL_LINK:
3143 cnt = hdev->acl_cnt;
3144 break;
bd1eb66b
AE
3145 case AMP_LINK:
3146 cnt = hdev->block_cnt;
3147 break;
73d80deb
LAD
3148 case SCO_LINK:
3149 case ESCO_LINK:
3150 cnt = hdev->sco_cnt;
3151 break;
3152 case LE_LINK:
3153 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3154 break;
3155 default:
3156 cnt = 0;
3157 BT_ERR("Unknown link type");
3158 }
3159
3160 q = cnt / num;
3161 *quote = q ? q : 1;
3162 BT_DBG("chan %p quote %d", chan, *quote);
3163 return chan;
3164}
3165
02b20f0b
LAD
3166static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3167{
3168 struct hci_conn_hash *h = &hdev->conn_hash;
3169 struct hci_conn *conn;
3170 int num = 0;
3171
3172 BT_DBG("%s", hdev->name);
3173
bf4c6325
GP
3174 rcu_read_lock();
3175
3176 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3177 struct hci_chan *chan;
3178
3179 if (conn->type != type)
3180 continue;
3181
3182 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3183 continue;
3184
3185 num++;
3186
8192edef 3187 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3188 struct sk_buff *skb;
3189
3190 if (chan->sent) {
3191 chan->sent = 0;
3192 continue;
3193 }
3194
3195 if (skb_queue_empty(&chan->data_q))
3196 continue;
3197
3198 skb = skb_peek(&chan->data_q);
3199 if (skb->priority >= HCI_PRIO_MAX - 1)
3200 continue;
3201
3202 skb->priority = HCI_PRIO_MAX - 1;
3203
3204 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3205 skb->priority);
02b20f0b
LAD
3206 }
3207
3208 if (hci_conn_num(hdev, type) == num)
3209 break;
3210 }
bf4c6325
GP
3211
3212 rcu_read_unlock();
3213
02b20f0b
LAD
3214}
3215
b71d385a
AE
3216static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3217{
3218 /* Calculate count of blocks used by this packet */
3219 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3220}
3221
6039aa73 3222static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3223{
1da177e4
LT
3224 if (!test_bit(HCI_RAW, &hdev->flags)) {
3225 /* ACL tx timeout must be longer than maximum
3226 * link supervision timeout (40.9 seconds) */
63d2bc1b 3227 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3228 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3229 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3230 }
63d2bc1b 3231}
1da177e4 3232
6039aa73 3233static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3234{
3235 unsigned int cnt = hdev->acl_cnt;
3236 struct hci_chan *chan;
3237 struct sk_buff *skb;
3238 int quote;
3239
3240 __check_timeout(hdev, cnt);
04837f64 3241
73d80deb 3242 while (hdev->acl_cnt &&
a8c5fb1a 3243 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3244 u32 priority = (skb_peek(&chan->data_q))->priority;
3245 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3246 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3247 skb->len, skb->priority);
73d80deb 3248
ec1cce24
LAD
3249 /* Stop if priority has changed */
3250 if (skb->priority < priority)
3251 break;
3252
3253 skb = skb_dequeue(&chan->data_q);
3254
73d80deb 3255 hci_conn_enter_active_mode(chan->conn,
04124681 3256 bt_cb(skb)->force_active);
04837f64 3257
57d17d70 3258 hci_send_frame(hdev, skb);
1da177e4
LT
3259 hdev->acl_last_tx = jiffies;
3260
3261 hdev->acl_cnt--;
73d80deb
LAD
3262 chan->sent++;
3263 chan->conn->sent++;
1da177e4
LT
3264 }
3265 }
02b20f0b
LAD
3266
3267 if (cnt != hdev->acl_cnt)
3268 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3269}
3270
6039aa73 3271static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3272{
63d2bc1b 3273 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3274 struct hci_chan *chan;
3275 struct sk_buff *skb;
3276 int quote;
bd1eb66b 3277 u8 type;
b71d385a 3278
63d2bc1b 3279 __check_timeout(hdev, cnt);
b71d385a 3280
bd1eb66b
AE
3281 BT_DBG("%s", hdev->name);
3282
3283 if (hdev->dev_type == HCI_AMP)
3284 type = AMP_LINK;
3285 else
3286 type = ACL_LINK;
3287
b71d385a 3288 while (hdev->block_cnt > 0 &&
bd1eb66b 3289 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3290 u32 priority = (skb_peek(&chan->data_q))->priority;
3291 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3292 int blocks;
3293
3294 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3295 skb->len, skb->priority);
b71d385a
AE
3296
3297 /* Stop if priority has changed */
3298 if (skb->priority < priority)
3299 break;
3300
3301 skb = skb_dequeue(&chan->data_q);
3302
3303 blocks = __get_blocks(hdev, skb);
3304 if (blocks > hdev->block_cnt)
3305 return;
3306
3307 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3308 bt_cb(skb)->force_active);
b71d385a 3309
57d17d70 3310 hci_send_frame(hdev, skb);
b71d385a
AE
3311 hdev->acl_last_tx = jiffies;
3312
3313 hdev->block_cnt -= blocks;
3314 quote -= blocks;
3315
3316 chan->sent += blocks;
3317 chan->conn->sent += blocks;
3318 }
3319 }
3320
3321 if (cnt != hdev->block_cnt)
bd1eb66b 3322 hci_prio_recalculate(hdev, type);
b71d385a
AE
3323}
3324
6039aa73 3325static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3326{
3327 BT_DBG("%s", hdev->name);
3328
bd1eb66b
AE
3329 /* No ACL link over BR/EDR controller */
3330 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3331 return;
3332
3333 /* No AMP link over AMP controller */
3334 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3335 return;
3336
3337 switch (hdev->flow_ctl_mode) {
3338 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3339 hci_sched_acl_pkt(hdev);
3340 break;
3341
3342 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3343 hci_sched_acl_blk(hdev);
3344 break;
3345 }
3346}
3347
1da177e4 3348/* Schedule SCO */
6039aa73 3349static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3350{
3351 struct hci_conn *conn;
3352 struct sk_buff *skb;
3353 int quote;
3354
3355 BT_DBG("%s", hdev->name);
3356
52087a79
LAD
3357 if (!hci_conn_num(hdev, SCO_LINK))
3358 return;
3359
1da177e4
LT
3360 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3361 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3362 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3363 hci_send_frame(hdev, skb);
1da177e4
LT
3364
3365 conn->sent++;
3366 if (conn->sent == ~0)
3367 conn->sent = 0;
3368 }
3369 }
3370}
3371
6039aa73 3372static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3373{
3374 struct hci_conn *conn;
3375 struct sk_buff *skb;
3376 int quote;
3377
3378 BT_DBG("%s", hdev->name);
3379
52087a79
LAD
3380 if (!hci_conn_num(hdev, ESCO_LINK))
3381 return;
3382
8fc9ced3
GP
3383 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3384 &quote))) {
b6a0dc82
MH
3385 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3386 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3387 hci_send_frame(hdev, skb);
b6a0dc82
MH
3388
3389 conn->sent++;
3390 if (conn->sent == ~0)
3391 conn->sent = 0;
3392 }
3393 }
3394}
3395
6039aa73 3396static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3397{
73d80deb 3398 struct hci_chan *chan;
6ed58ec5 3399 struct sk_buff *skb;
02b20f0b 3400 int quote, cnt, tmp;
6ed58ec5
VT
3401
3402 BT_DBG("%s", hdev->name);
3403
52087a79
LAD
3404 if (!hci_conn_num(hdev, LE_LINK))
3405 return;
3406
6ed58ec5
VT
3407 if (!test_bit(HCI_RAW, &hdev->flags)) {
3408 /* LE tx timeout must be longer than maximum
3409 * link supervision timeout (40.9 seconds) */
bae1f5d9 3410 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3411 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3412 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3413 }
3414
3415 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3416 tmp = cnt;
73d80deb 3417 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3418 u32 priority = (skb_peek(&chan->data_q))->priority;
3419 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3420 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3421 skb->len, skb->priority);
6ed58ec5 3422
ec1cce24
LAD
3423 /* Stop if priority has changed */
3424 if (skb->priority < priority)
3425 break;
3426
3427 skb = skb_dequeue(&chan->data_q);
3428
57d17d70 3429 hci_send_frame(hdev, skb);
6ed58ec5
VT
3430 hdev->le_last_tx = jiffies;
3431
3432 cnt--;
73d80deb
LAD
3433 chan->sent++;
3434 chan->conn->sent++;
6ed58ec5
VT
3435 }
3436 }
73d80deb 3437
6ed58ec5
VT
3438 if (hdev->le_pkts)
3439 hdev->le_cnt = cnt;
3440 else
3441 hdev->acl_cnt = cnt;
02b20f0b
LAD
3442
3443 if (cnt != tmp)
3444 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3445}
3446
3eff45ea 3447static void hci_tx_work(struct work_struct *work)
1da177e4 3448{
3eff45ea 3449 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3450 struct sk_buff *skb;
3451
6ed58ec5 3452 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3453 hdev->sco_cnt, hdev->le_cnt);
1da177e4 3454
52de599e
MH
3455 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3456 /* Schedule queues and send stuff to HCI driver */
3457 hci_sched_acl(hdev);
3458 hci_sched_sco(hdev);
3459 hci_sched_esco(hdev);
3460 hci_sched_le(hdev);
3461 }
6ed58ec5 3462
1da177e4
LT
3463 /* Send next queued raw (unknown type) packet */
3464 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 3465 hci_send_frame(hdev, skb);
1da177e4
LT
3466}
3467
25985edc 3468/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3469
3470/* ACL data packet */
6039aa73 3471static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3472{
3473 struct hci_acl_hdr *hdr = (void *) skb->data;
3474 struct hci_conn *conn;
3475 __u16 handle, flags;
3476
3477 skb_pull(skb, HCI_ACL_HDR_SIZE);
3478
3479 handle = __le16_to_cpu(hdr->handle);
3480 flags = hci_flags(handle);
3481 handle = hci_handle(handle);
3482
f0e09510 3483 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3484 handle, flags);
1da177e4
LT
3485
3486 hdev->stat.acl_rx++;
3487
3488 hci_dev_lock(hdev);
3489 conn = hci_conn_hash_lookup_handle(hdev, handle);
3490 hci_dev_unlock(hdev);
8e87d142 3491
1da177e4 3492 if (conn) {
65983fc7 3493 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3494
1da177e4 3495 /* Send to upper protocol */
686ebf28
UF
3496 l2cap_recv_acldata(conn, skb, flags);
3497 return;
1da177e4 3498 } else {
8e87d142 3499 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3500 hdev->name, handle);
1da177e4
LT
3501 }
3502
3503 kfree_skb(skb);
3504}
3505
3506/* SCO data packet */
6039aa73 3507static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3508{
3509 struct hci_sco_hdr *hdr = (void *) skb->data;
3510 struct hci_conn *conn;
3511 __u16 handle;
3512
3513 skb_pull(skb, HCI_SCO_HDR_SIZE);
3514
3515 handle = __le16_to_cpu(hdr->handle);
3516
f0e09510 3517 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3518
3519 hdev->stat.sco_rx++;
3520
3521 hci_dev_lock(hdev);
3522 conn = hci_conn_hash_lookup_handle(hdev, handle);
3523 hci_dev_unlock(hdev);
3524
3525 if (conn) {
1da177e4 3526 /* Send to upper protocol */
686ebf28
UF
3527 sco_recv_scodata(conn, skb);
3528 return;
1da177e4 3529 } else {
8e87d142 3530 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3531 hdev->name, handle);
1da177e4
LT
3532 }
3533
3534 kfree_skb(skb);
3535}
3536
9238f36a
JH
3537static bool hci_req_is_complete(struct hci_dev *hdev)
3538{
3539 struct sk_buff *skb;
3540
3541 skb = skb_peek(&hdev->cmd_q);
3542 if (!skb)
3543 return true;
3544
3545 return bt_cb(skb)->req.start;
3546}
3547
42c6b129
JH
3548static void hci_resend_last(struct hci_dev *hdev)
3549{
3550 struct hci_command_hdr *sent;
3551 struct sk_buff *skb;
3552 u16 opcode;
3553
3554 if (!hdev->sent_cmd)
3555 return;
3556
3557 sent = (void *) hdev->sent_cmd->data;
3558 opcode = __le16_to_cpu(sent->opcode);
3559 if (opcode == HCI_OP_RESET)
3560 return;
3561
3562 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3563 if (!skb)
3564 return;
3565
3566 skb_queue_head(&hdev->cmd_q, skb);
3567 queue_work(hdev->workqueue, &hdev->cmd_work);
3568}
3569
9238f36a
JH
3570void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3571{
3572 hci_req_complete_t req_complete = NULL;
3573 struct sk_buff *skb;
3574 unsigned long flags;
3575
3576 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3577
42c6b129
JH
3578 /* If the completed command doesn't match the last one that was
3579 * sent we need to do special handling of it.
9238f36a 3580 */
42c6b129
JH
3581 if (!hci_sent_cmd_data(hdev, opcode)) {
3582 /* Some CSR based controllers generate a spontaneous
3583 * reset complete event during init and any pending
3584 * command will never be completed. In such a case we
3585 * need to resend whatever was the last sent
3586 * command.
3587 */
3588 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3589 hci_resend_last(hdev);
3590
9238f36a 3591 return;
42c6b129 3592 }
9238f36a
JH
3593
3594 /* If the command succeeded and there's still more commands in
3595 * this request the request is not yet complete.
3596 */
3597 if (!status && !hci_req_is_complete(hdev))
3598 return;
3599
3600 /* If this was the last command in a request the complete
3601 * callback would be found in hdev->sent_cmd instead of the
3602 * command queue (hdev->cmd_q).
3603 */
3604 if (hdev->sent_cmd) {
3605 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
3606
3607 if (req_complete) {
3608 /* We must set the complete callback to NULL to
3609 * avoid calling the callback more than once if
3610 * this function gets called again.
3611 */
3612 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3613
9238f36a 3614 goto call_complete;
53e21fbc 3615 }
9238f36a
JH
3616 }
3617
3618 /* Remove all pending commands belonging to this request */
3619 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3620 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3621 if (bt_cb(skb)->req.start) {
3622 __skb_queue_head(&hdev->cmd_q, skb);
3623 break;
3624 }
3625
3626 req_complete = bt_cb(skb)->req.complete;
3627 kfree_skb(skb);
3628 }
3629 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3630
3631call_complete:
3632 if (req_complete)
3633 req_complete(hdev, status);
3634}
3635
b78752cc 3636static void hci_rx_work(struct work_struct *work)
1da177e4 3637{
b78752cc 3638 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3639 struct sk_buff *skb;
3640
3641 BT_DBG("%s", hdev->name);
3642
1da177e4 3643 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3644 /* Send copy to monitor */
3645 hci_send_to_monitor(hdev, skb);
3646
1da177e4
LT
3647 if (atomic_read(&hdev->promisc)) {
3648 /* Send copy to the sockets */
470fe1b5 3649 hci_send_to_sock(hdev, skb);
1da177e4
LT
3650 }
3651
0736cfa8
MH
3652 if (test_bit(HCI_RAW, &hdev->flags) ||
3653 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
3654 kfree_skb(skb);
3655 continue;
3656 }
3657
3658 if (test_bit(HCI_INIT, &hdev->flags)) {
3659 /* Don't process data packets in this states. */
0d48d939 3660 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3661 case HCI_ACLDATA_PKT:
3662 case HCI_SCODATA_PKT:
3663 kfree_skb(skb);
3664 continue;
3ff50b79 3665 }
1da177e4
LT
3666 }
3667
3668 /* Process frame */
0d48d939 3669 switch (bt_cb(skb)->pkt_type) {
1da177e4 3670 case HCI_EVENT_PKT:
b78752cc 3671 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3672 hci_event_packet(hdev, skb);
3673 break;
3674
3675 case HCI_ACLDATA_PKT:
3676 BT_DBG("%s ACL data packet", hdev->name);
3677 hci_acldata_packet(hdev, skb);
3678 break;
3679
3680 case HCI_SCODATA_PKT:
3681 BT_DBG("%s SCO data packet", hdev->name);
3682 hci_scodata_packet(hdev, skb);
3683 break;
3684
3685 default:
3686 kfree_skb(skb);
3687 break;
3688 }
3689 }
1da177e4
LT
3690}
3691
c347b765 3692static void hci_cmd_work(struct work_struct *work)
1da177e4 3693{
c347b765 3694 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3695 struct sk_buff *skb;
3696
2104786b
AE
3697 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3698 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3699
1da177e4 3700 /* Send queued commands */
5a08ecce
AE
3701 if (atomic_read(&hdev->cmd_cnt)) {
3702 skb = skb_dequeue(&hdev->cmd_q);
3703 if (!skb)
3704 return;
3705
7585b97a 3706 kfree_skb(hdev->sent_cmd);
1da177e4 3707
a675d7f1 3708 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 3709 if (hdev->sent_cmd) {
1da177e4 3710 atomic_dec(&hdev->cmd_cnt);
57d17d70 3711 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
3712 if (test_bit(HCI_RESET, &hdev->flags))
3713 del_timer(&hdev->cmd_timer);
3714 else
3715 mod_timer(&hdev->cmd_timer,
5f246e89 3716 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3717 } else {
3718 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3719 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3720 }
3721 }
3722}
This page took 0.910421 seconds and 5 git commands to generate.