Bluetooth: Use IS_ERR_OR_NULL for checking bt_debugfs
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
47219839 32#include <asm/unaligned.h>
1da177e4
LT
33
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
b78752cc 37static void hci_rx_work(struct work_struct *work);
c347b765 38static void hci_cmd_work(struct work_struct *work);
3eff45ea 39static void hci_tx_work(struct work_struct *work);
1da177e4 40
1da177e4
LT
41/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
3df92b31
SL
49/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
1da177e4
LT
52/* ---- HCI notifications ---- */
53
6516455d 54static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 55{
040030ef 56 hci_sock_dev_event(hdev, event);
1da177e4
LT
57}
58
baf27f6e
MH
59/* ---- HCI debugfs entries ---- */
60
70afe0b8
MH
61static int blacklist_show(struct seq_file *f, void *p)
62{
63 struct hci_dev *hdev = f->private;
64 struct bdaddr_list *b;
65
66 hci_dev_lock(hdev);
67 list_for_each_entry(b, &hdev->blacklist, list)
68 seq_printf(f, "%pMR\n", &b->bdaddr);
69 hci_dev_unlock(hdev);
70
71 return 0;
72}
73
74static int blacklist_open(struct inode *inode, struct file *file)
75{
76 return single_open(file, blacklist_show, inode->i_private);
77}
78
79static const struct file_operations blacklist_fops = {
80 .open = blacklist_open,
81 .read = seq_read,
82 .llseek = seq_lseek,
83 .release = single_release,
84};
85
47219839
MH
86static int uuids_show(struct seq_file *f, void *p)
87{
88 struct hci_dev *hdev = f->private;
89 struct bt_uuid *uuid;
90
91 hci_dev_lock(hdev);
92 list_for_each_entry(uuid, &hdev->uuids, list) {
93 u32 data0, data5;
94 u16 data1, data2, data3, data4;
95
96 data5 = get_unaligned_le32(uuid);
97 data4 = get_unaligned_le16(uuid + 4);
98 data3 = get_unaligned_le16(uuid + 6);
99 data2 = get_unaligned_le16(uuid + 8);
100 data1 = get_unaligned_le16(uuid + 10);
101 data0 = get_unaligned_le32(uuid + 12);
102
103 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
104 data0, data1, data2, data3, data4, data5);
105 }
106 hci_dev_unlock(hdev);
107
108 return 0;
109}
110
111static int uuids_open(struct inode *inode, struct file *file)
112{
113 return single_open(file, uuids_show, inode->i_private);
114}
115
116static const struct file_operations uuids_fops = {
117 .open = uuids_open,
118 .read = seq_read,
119 .llseek = seq_lseek,
120 .release = single_release,
121};
122
baf27f6e
MH
123static int inquiry_cache_show(struct seq_file *f, void *p)
124{
125 struct hci_dev *hdev = f->private;
126 struct discovery_state *cache = &hdev->discovery;
127 struct inquiry_entry *e;
128
129 hci_dev_lock(hdev);
130
131 list_for_each_entry(e, &cache->all, all) {
132 struct inquiry_data *data = &e->data;
133 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
134 &data->bdaddr,
135 data->pscan_rep_mode, data->pscan_period_mode,
136 data->pscan_mode, data->dev_class[2],
137 data->dev_class[1], data->dev_class[0],
138 __le16_to_cpu(data->clock_offset),
139 data->rssi, data->ssp_mode, e->timestamp);
140 }
141
142 hci_dev_unlock(hdev);
143
144 return 0;
145}
146
147static int inquiry_cache_open(struct inode *inode, struct file *file)
148{
149 return single_open(file, inquiry_cache_show, inode->i_private);
150}
151
152static const struct file_operations inquiry_cache_fops = {
153 .open = inquiry_cache_open,
154 .read = seq_read,
155 .llseek = seq_lseek,
156 .release = single_release,
157};
158
041000b9
MH
159static int voice_setting_get(void *data, u64 *val)
160{
161 struct hci_dev *hdev = data;
162
163 hci_dev_lock(hdev);
164 *val = hdev->voice_setting;
165 hci_dev_unlock(hdev);
166
167 return 0;
168}
169
170DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
171 NULL, "0x%4.4llx\n");
172
ebd1e33b
MH
173static int auto_accept_delay_set(void *data, u64 val)
174{
175 struct hci_dev *hdev = data;
176
177 hci_dev_lock(hdev);
178 hdev->auto_accept_delay = val;
179 hci_dev_unlock(hdev);
180
181 return 0;
182}
183
184static int auto_accept_delay_get(void *data, u64 *val)
185{
186 struct hci_dev *hdev = data;
187
188 hci_dev_lock(hdev);
189 *val = hdev->auto_accept_delay;
190 hci_dev_unlock(hdev);
191
192 return 0;
193}
194
195DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
196 auto_accept_delay_set, "%llu\n");
197
e7b8fc92
MH
198static int static_address_show(struct seq_file *f, void *p)
199{
200 struct hci_dev *hdev = f->private;
201
202 hci_dev_lock(hdev);
203 seq_printf(f, "%pMR\n", &hdev->static_addr);
204 hci_dev_unlock(hdev);
205
206 return 0;
207}
208
209static int static_address_open(struct inode *inode, struct file *file)
210{
211 return single_open(file, static_address_show, inode->i_private);
212}
213
214static const struct file_operations static_address_fops = {
215 .open = static_address_open,
216 .read = seq_read,
217 .llseek = seq_lseek,
218 .release = single_release,
219};
220
1da177e4
LT
221/* ---- HCI requests ---- */
222
42c6b129 223static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 224{
42c6b129 225 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
226
227 if (hdev->req_status == HCI_REQ_PEND) {
228 hdev->req_result = result;
229 hdev->req_status = HCI_REQ_DONE;
230 wake_up_interruptible(&hdev->req_wait_q);
231 }
232}
233
234static void hci_req_cancel(struct hci_dev *hdev, int err)
235{
236 BT_DBG("%s err 0x%2.2x", hdev->name, err);
237
238 if (hdev->req_status == HCI_REQ_PEND) {
239 hdev->req_result = err;
240 hdev->req_status = HCI_REQ_CANCELED;
241 wake_up_interruptible(&hdev->req_wait_q);
242 }
243}
244
77a63e0a
FW
245static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
246 u8 event)
75e84b7c
JH
247{
248 struct hci_ev_cmd_complete *ev;
249 struct hci_event_hdr *hdr;
250 struct sk_buff *skb;
251
252 hci_dev_lock(hdev);
253
254 skb = hdev->recv_evt;
255 hdev->recv_evt = NULL;
256
257 hci_dev_unlock(hdev);
258
259 if (!skb)
260 return ERR_PTR(-ENODATA);
261
262 if (skb->len < sizeof(*hdr)) {
263 BT_ERR("Too short HCI event");
264 goto failed;
265 }
266
267 hdr = (void *) skb->data;
268 skb_pull(skb, HCI_EVENT_HDR_SIZE);
269
7b1abbbe
JH
270 if (event) {
271 if (hdr->evt != event)
272 goto failed;
273 return skb;
274 }
275
75e84b7c
JH
276 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
277 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
278 goto failed;
279 }
280
281 if (skb->len < sizeof(*ev)) {
282 BT_ERR("Too short cmd_complete event");
283 goto failed;
284 }
285
286 ev = (void *) skb->data;
287 skb_pull(skb, sizeof(*ev));
288
289 if (opcode == __le16_to_cpu(ev->opcode))
290 return skb;
291
292 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
293 __le16_to_cpu(ev->opcode));
294
295failed:
296 kfree_skb(skb);
297 return ERR_PTR(-ENODATA);
298}
299
7b1abbbe 300struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 301 const void *param, u8 event, u32 timeout)
75e84b7c
JH
302{
303 DECLARE_WAITQUEUE(wait, current);
304 struct hci_request req;
305 int err = 0;
306
307 BT_DBG("%s", hdev->name);
308
309 hci_req_init(&req, hdev);
310
7b1abbbe 311 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
312
313 hdev->req_status = HCI_REQ_PEND;
314
315 err = hci_req_run(&req, hci_req_sync_complete);
316 if (err < 0)
317 return ERR_PTR(err);
318
319 add_wait_queue(&hdev->req_wait_q, &wait);
320 set_current_state(TASK_INTERRUPTIBLE);
321
322 schedule_timeout(timeout);
323
324 remove_wait_queue(&hdev->req_wait_q, &wait);
325
326 if (signal_pending(current))
327 return ERR_PTR(-EINTR);
328
329 switch (hdev->req_status) {
330 case HCI_REQ_DONE:
331 err = -bt_to_errno(hdev->req_result);
332 break;
333
334 case HCI_REQ_CANCELED:
335 err = -hdev->req_result;
336 break;
337
338 default:
339 err = -ETIMEDOUT;
340 break;
341 }
342
343 hdev->req_status = hdev->req_result = 0;
344
345 BT_DBG("%s end: err %d", hdev->name, err);
346
347 if (err < 0)
348 return ERR_PTR(err);
349
7b1abbbe
JH
350 return hci_get_cmd_complete(hdev, opcode, event);
351}
352EXPORT_SYMBOL(__hci_cmd_sync_ev);
353
354struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 355 const void *param, u32 timeout)
7b1abbbe
JH
356{
357 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
358}
359EXPORT_SYMBOL(__hci_cmd_sync);
360
1da177e4 361/* Execute request and wait for completion. */
01178cd4 362static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
363 void (*func)(struct hci_request *req,
364 unsigned long opt),
01178cd4 365 unsigned long opt, __u32 timeout)
1da177e4 366{
42c6b129 367 struct hci_request req;
1da177e4
LT
368 DECLARE_WAITQUEUE(wait, current);
369 int err = 0;
370
371 BT_DBG("%s start", hdev->name);
372
42c6b129
JH
373 hci_req_init(&req, hdev);
374
1da177e4
LT
375 hdev->req_status = HCI_REQ_PEND;
376
42c6b129 377 func(&req, opt);
53cce22d 378
42c6b129
JH
379 err = hci_req_run(&req, hci_req_sync_complete);
380 if (err < 0) {
53cce22d 381 hdev->req_status = 0;
920c8300
AG
382
383 /* ENODATA means the HCI request command queue is empty.
384 * This can happen when a request with conditionals doesn't
385 * trigger any commands to be sent. This is normal behavior
386 * and should not trigger an error return.
42c6b129 387 */
920c8300
AG
388 if (err == -ENODATA)
389 return 0;
390
391 return err;
53cce22d
JH
392 }
393
bc4445c7
AG
394 add_wait_queue(&hdev->req_wait_q, &wait);
395 set_current_state(TASK_INTERRUPTIBLE);
396
1da177e4
LT
397 schedule_timeout(timeout);
398
399 remove_wait_queue(&hdev->req_wait_q, &wait);
400
401 if (signal_pending(current))
402 return -EINTR;
403
404 switch (hdev->req_status) {
405 case HCI_REQ_DONE:
e175072f 406 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
407 break;
408
409 case HCI_REQ_CANCELED:
410 err = -hdev->req_result;
411 break;
412
413 default:
414 err = -ETIMEDOUT;
415 break;
3ff50b79 416 }
1da177e4 417
a5040efa 418 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
419
420 BT_DBG("%s end: err %d", hdev->name, err);
421
422 return err;
423}
424
01178cd4 425static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
426 void (*req)(struct hci_request *req,
427 unsigned long opt),
01178cd4 428 unsigned long opt, __u32 timeout)
1da177e4
LT
429{
430 int ret;
431
7c6a329e
MH
432 if (!test_bit(HCI_UP, &hdev->flags))
433 return -ENETDOWN;
434
1da177e4
LT
435 /* Serialize all requests */
436 hci_req_lock(hdev);
01178cd4 437 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
438 hci_req_unlock(hdev);
439
440 return ret;
441}
442
42c6b129 443static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 444{
42c6b129 445 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
446
447 /* Reset device */
42c6b129
JH
448 set_bit(HCI_RESET, &req->hdev->flags);
449 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
450}
451
42c6b129 452static void bredr_init(struct hci_request *req)
1da177e4 453{
42c6b129 454 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 455
1da177e4 456 /* Read Local Supported Features */
42c6b129 457 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 458
1143e5a6 459 /* Read Local Version */
42c6b129 460 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
461
462 /* Read BD Address */
42c6b129 463 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
464}
465
42c6b129 466static void amp_init(struct hci_request *req)
e61ef499 467{
42c6b129 468 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 469
e61ef499 470 /* Read Local Version */
42c6b129 471 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 472
f6996cfe
MH
473 /* Read Local Supported Commands */
474 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
475
476 /* Read Local Supported Features */
477 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
478
6bcbc489 479 /* Read Local AMP Info */
42c6b129 480 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
481
482 /* Read Data Blk size */
42c6b129 483 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 484
f38ba941
MH
485 /* Read Flow Control Mode */
486 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
487
7528ca1c
MH
488 /* Read Location Data */
489 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
490}
491
42c6b129 492static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 493{
42c6b129 494 struct hci_dev *hdev = req->hdev;
e61ef499
AE
495
496 BT_DBG("%s %ld", hdev->name, opt);
497
11778716
AE
498 /* Reset */
499 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 500 hci_reset_req(req, 0);
11778716 501
e61ef499
AE
502 switch (hdev->dev_type) {
503 case HCI_BREDR:
42c6b129 504 bredr_init(req);
e61ef499
AE
505 break;
506
507 case HCI_AMP:
42c6b129 508 amp_init(req);
e61ef499
AE
509 break;
510
511 default:
512 BT_ERR("Unknown device type %d", hdev->dev_type);
513 break;
514 }
e61ef499
AE
515}
516
42c6b129 517static void bredr_setup(struct hci_request *req)
2177bab5 518{
4ca048e3
MH
519 struct hci_dev *hdev = req->hdev;
520
2177bab5
JH
521 __le16 param;
522 __u8 flt_type;
523
524 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 525 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
526
527 /* Read Class of Device */
42c6b129 528 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
529
530 /* Read Local Name */
42c6b129 531 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
532
533 /* Read Voice Setting */
42c6b129 534 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 535
b4cb9fb2
MH
536 /* Read Number of Supported IAC */
537 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
538
4b836f39
MH
539 /* Read Current IAC LAP */
540 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
541
2177bab5
JH
542 /* Clear Event Filters */
543 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 544 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
545
546 /* Connection accept timeout ~20 secs */
547 param = __constant_cpu_to_le16(0x7d00);
42c6b129 548 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 549
4ca048e3
MH
550 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
551 * but it does not support page scan related HCI commands.
552 */
553 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
554 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
555 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
556 }
2177bab5
JH
557}
558
42c6b129 559static void le_setup(struct hci_request *req)
2177bab5 560{
c73eee91
JH
561 struct hci_dev *hdev = req->hdev;
562
2177bab5 563 /* Read LE Buffer Size */
42c6b129 564 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
565
566 /* Read LE Local Supported Features */
42c6b129 567 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
568
569 /* Read LE Advertising Channel TX Power */
42c6b129 570 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
571
572 /* Read LE White List Size */
42c6b129 573 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
574
575 /* Read LE Supported States */
42c6b129 576 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
577
578 /* LE-only controllers have LE implicitly enabled */
579 if (!lmp_bredr_capable(hdev))
580 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
581}
582
583static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
584{
585 if (lmp_ext_inq_capable(hdev))
586 return 0x02;
587
588 if (lmp_inq_rssi_capable(hdev))
589 return 0x01;
590
591 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
592 hdev->lmp_subver == 0x0757)
593 return 0x01;
594
595 if (hdev->manufacturer == 15) {
596 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
597 return 0x01;
598 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
599 return 0x01;
600 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
601 return 0x01;
602 }
603
604 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
605 hdev->lmp_subver == 0x1805)
606 return 0x01;
607
608 return 0x00;
609}
610
42c6b129 611static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
612{
613 u8 mode;
614
42c6b129 615 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 616
42c6b129 617 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
618}
619
42c6b129 620static void hci_setup_event_mask(struct hci_request *req)
2177bab5 621{
42c6b129
JH
622 struct hci_dev *hdev = req->hdev;
623
2177bab5
JH
624 /* The second byte is 0xff instead of 0x9f (two reserved bits
625 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
626 * command otherwise.
627 */
628 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
629
630 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
631 * any event mask for pre 1.2 devices.
632 */
633 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
634 return;
635
636 if (lmp_bredr_capable(hdev)) {
637 events[4] |= 0x01; /* Flow Specification Complete */
638 events[4] |= 0x02; /* Inquiry Result with RSSI */
639 events[4] |= 0x04; /* Read Remote Extended Features Complete */
640 events[5] |= 0x08; /* Synchronous Connection Complete */
641 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
642 } else {
643 /* Use a different default for LE-only devices */
644 memset(events, 0, sizeof(events));
645 events[0] |= 0x10; /* Disconnection Complete */
646 events[0] |= 0x80; /* Encryption Change */
647 events[1] |= 0x08; /* Read Remote Version Information Complete */
648 events[1] |= 0x20; /* Command Complete */
649 events[1] |= 0x40; /* Command Status */
650 events[1] |= 0x80; /* Hardware Error */
651 events[2] |= 0x04; /* Number of Completed Packets */
652 events[3] |= 0x02; /* Data Buffer Overflow */
653 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
654 }
655
656 if (lmp_inq_rssi_capable(hdev))
657 events[4] |= 0x02; /* Inquiry Result with RSSI */
658
659 if (lmp_sniffsubr_capable(hdev))
660 events[5] |= 0x20; /* Sniff Subrating */
661
662 if (lmp_pause_enc_capable(hdev))
663 events[5] |= 0x80; /* Encryption Key Refresh Complete */
664
665 if (lmp_ext_inq_capable(hdev))
666 events[5] |= 0x40; /* Extended Inquiry Result */
667
668 if (lmp_no_flush_capable(hdev))
669 events[7] |= 0x01; /* Enhanced Flush Complete */
670
671 if (lmp_lsto_capable(hdev))
672 events[6] |= 0x80; /* Link Supervision Timeout Changed */
673
674 if (lmp_ssp_capable(hdev)) {
675 events[6] |= 0x01; /* IO Capability Request */
676 events[6] |= 0x02; /* IO Capability Response */
677 events[6] |= 0x04; /* User Confirmation Request */
678 events[6] |= 0x08; /* User Passkey Request */
679 events[6] |= 0x10; /* Remote OOB Data Request */
680 events[6] |= 0x20; /* Simple Pairing Complete */
681 events[7] |= 0x04; /* User Passkey Notification */
682 events[7] |= 0x08; /* Keypress Notification */
683 events[7] |= 0x10; /* Remote Host Supported
684 * Features Notification
685 */
686 }
687
688 if (lmp_le_capable(hdev))
689 events[7] |= 0x20; /* LE Meta-Event */
690
42c6b129 691 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
692
693 if (lmp_le_capable(hdev)) {
694 memset(events, 0, sizeof(events));
695 events[0] = 0x1f;
42c6b129
JH
696 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
697 sizeof(events), events);
2177bab5
JH
698 }
699}
700
42c6b129 701static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 702{
42c6b129
JH
703 struct hci_dev *hdev = req->hdev;
704
2177bab5 705 if (lmp_bredr_capable(hdev))
42c6b129 706 bredr_setup(req);
56f87901
JH
707 else
708 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
709
710 if (lmp_le_capable(hdev))
42c6b129 711 le_setup(req);
2177bab5 712
42c6b129 713 hci_setup_event_mask(req);
2177bab5 714
3f8e2d75
JH
715 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
716 * local supported commands HCI command.
717 */
718 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 719 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
720
721 if (lmp_ssp_capable(hdev)) {
722 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
723 u8 mode = 0x01;
42c6b129
JH
724 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
725 sizeof(mode), &mode);
2177bab5
JH
726 } else {
727 struct hci_cp_write_eir cp;
728
729 memset(hdev->eir, 0, sizeof(hdev->eir));
730 memset(&cp, 0, sizeof(cp));
731
42c6b129 732 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
733 }
734 }
735
736 if (lmp_inq_rssi_capable(hdev))
42c6b129 737 hci_setup_inquiry_mode(req);
2177bab5
JH
738
739 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 740 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
741
742 if (lmp_ext_feat_capable(hdev)) {
743 struct hci_cp_read_local_ext_features cp;
744
745 cp.page = 0x01;
42c6b129
JH
746 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
747 sizeof(cp), &cp);
2177bab5
JH
748 }
749
750 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
751 u8 enable = 1;
42c6b129
JH
752 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
753 &enable);
2177bab5
JH
754 }
755}
756
42c6b129 757static void hci_setup_link_policy(struct hci_request *req)
2177bab5 758{
42c6b129 759 struct hci_dev *hdev = req->hdev;
2177bab5
JH
760 struct hci_cp_write_def_link_policy cp;
761 u16 link_policy = 0;
762
763 if (lmp_rswitch_capable(hdev))
764 link_policy |= HCI_LP_RSWITCH;
765 if (lmp_hold_capable(hdev))
766 link_policy |= HCI_LP_HOLD;
767 if (lmp_sniff_capable(hdev))
768 link_policy |= HCI_LP_SNIFF;
769 if (lmp_park_capable(hdev))
770 link_policy |= HCI_LP_PARK;
771
772 cp.policy = cpu_to_le16(link_policy);
42c6b129 773 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
774}
775
42c6b129 776static void hci_set_le_support(struct hci_request *req)
2177bab5 777{
42c6b129 778 struct hci_dev *hdev = req->hdev;
2177bab5
JH
779 struct hci_cp_write_le_host_supported cp;
780
c73eee91
JH
781 /* LE-only devices do not support explicit enablement */
782 if (!lmp_bredr_capable(hdev))
783 return;
784
2177bab5
JH
785 memset(&cp, 0, sizeof(cp));
786
787 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
788 cp.le = 0x01;
789 cp.simul = lmp_le_br_capable(hdev);
790 }
791
792 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
793 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
794 &cp);
2177bab5
JH
795}
796
d62e6d67
JH
797static void hci_set_event_mask_page_2(struct hci_request *req)
798{
799 struct hci_dev *hdev = req->hdev;
800 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
801
802 /* If Connectionless Slave Broadcast master role is supported
803 * enable all necessary events for it.
804 */
805 if (hdev->features[2][0] & 0x01) {
806 events[1] |= 0x40; /* Triggered Clock Capture */
807 events[1] |= 0x80; /* Synchronization Train Complete */
808 events[2] |= 0x10; /* Slave Page Response Timeout */
809 events[2] |= 0x20; /* CSB Channel Map Change */
810 }
811
812 /* If Connectionless Slave Broadcast slave role is supported
813 * enable all necessary events for it.
814 */
815 if (hdev->features[2][0] & 0x02) {
816 events[2] |= 0x01; /* Synchronization Train Received */
817 events[2] |= 0x02; /* CSB Receive */
818 events[2] |= 0x04; /* CSB Timeout */
819 events[2] |= 0x08; /* Truncated Page Complete */
820 }
821
822 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
823}
824
42c6b129 825static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 826{
42c6b129 827 struct hci_dev *hdev = req->hdev;
d2c5d77f 828 u8 p;
42c6b129 829
b8f4e068
GP
830 /* Some Broadcom based Bluetooth controllers do not support the
831 * Delete Stored Link Key command. They are clearly indicating its
832 * absence in the bit mask of supported commands.
833 *
834 * Check the supported commands and only if the the command is marked
835 * as supported send it. If not supported assume that the controller
836 * does not have actual support for stored link keys which makes this
837 * command redundant anyway.
637b4cae 838 */
59f45d57
JH
839 if (hdev->commands[6] & 0x80) {
840 struct hci_cp_delete_stored_link_key cp;
841
842 bacpy(&cp.bdaddr, BDADDR_ANY);
843 cp.delete_all = 0x01;
844 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
845 sizeof(cp), &cp);
846 }
847
2177bab5 848 if (hdev->commands[5] & 0x10)
42c6b129 849 hci_setup_link_policy(req);
2177bab5 850
441ad2d0 851 if (lmp_le_capable(hdev))
42c6b129 852 hci_set_le_support(req);
d2c5d77f
JH
853
854 /* Read features beyond page 1 if available */
855 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
856 struct hci_cp_read_local_ext_features cp;
857
858 cp.page = p;
859 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
860 sizeof(cp), &cp);
861 }
2177bab5
JH
862}
863
5d4e7e8d
JH
864static void hci_init4_req(struct hci_request *req, unsigned long opt)
865{
866 struct hci_dev *hdev = req->hdev;
867
d62e6d67
JH
868 /* Set event mask page 2 if the HCI command for it is supported */
869 if (hdev->commands[22] & 0x04)
870 hci_set_event_mask_page_2(req);
871
5d4e7e8d
JH
872 /* Check for Synchronization Train support */
873 if (hdev->features[2][0] & 0x04)
874 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
875}
876
2177bab5
JH
877static int __hci_init(struct hci_dev *hdev)
878{
879 int err;
880
881 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
882 if (err < 0)
883 return err;
884
885 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
886 * BR/EDR/LE type controllers. AMP controllers only need the
887 * first stage init.
888 */
889 if (hdev->dev_type != HCI_BREDR)
890 return 0;
891
892 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
893 if (err < 0)
894 return err;
895
5d4e7e8d
JH
896 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
897 if (err < 0)
898 return err;
899
baf27f6e
MH
900 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
901 if (err < 0)
902 return err;
903
904 /* Only create debugfs entries during the initial setup
905 * phase and not every time the controller gets powered on.
906 */
907 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
908 return 0;
909
70afe0b8
MH
910 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
911 &blacklist_fops);
912
47219839
MH
913 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
914
baf27f6e
MH
915 if (lmp_bredr_capable(hdev)) {
916 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
917 hdev, &inquiry_cache_fops);
041000b9
MH
918 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
919 hdev, &voice_setting_fops);
baf27f6e
MH
920 }
921
ebd1e33b
MH
922 if (lmp_ssp_capable(hdev))
923 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
924 hdev, &auto_accept_delay_fops);
925
e7b8fc92
MH
926 if (lmp_le_capable(hdev))
927 debugfs_create_file("static_address", 0444, hdev->debugfs,
928 hdev, &static_address_fops);
929
baf27f6e 930 return 0;
2177bab5
JH
931}
932
42c6b129 933static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
934{
935 __u8 scan = opt;
936
42c6b129 937 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
938
939 /* Inquiry and Page scans */
42c6b129 940 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
941}
942
42c6b129 943static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
944{
945 __u8 auth = opt;
946
42c6b129 947 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
948
949 /* Authentication */
42c6b129 950 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
951}
952
42c6b129 953static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
954{
955 __u8 encrypt = opt;
956
42c6b129 957 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 958
e4e8e37c 959 /* Encryption */
42c6b129 960 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
961}
962
42c6b129 963static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
964{
965 __le16 policy = cpu_to_le16(opt);
966
42c6b129 967 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
968
969 /* Default link policy */
42c6b129 970 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
971}
972
8e87d142 973/* Get HCI device by index.
1da177e4
LT
974 * Device is held on return. */
975struct hci_dev *hci_dev_get(int index)
976{
8035ded4 977 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
978
979 BT_DBG("%d", index);
980
981 if (index < 0)
982 return NULL;
983
984 read_lock(&hci_dev_list_lock);
8035ded4 985 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
986 if (d->id == index) {
987 hdev = hci_dev_hold(d);
988 break;
989 }
990 }
991 read_unlock(&hci_dev_list_lock);
992 return hdev;
993}
1da177e4
LT
994
995/* ---- Inquiry support ---- */
ff9ef578 996
30dc78e1
JH
997bool hci_discovery_active(struct hci_dev *hdev)
998{
999 struct discovery_state *discov = &hdev->discovery;
1000
6fbe195d 1001 switch (discov->state) {
343f935b 1002 case DISCOVERY_FINDING:
6fbe195d 1003 case DISCOVERY_RESOLVING:
30dc78e1
JH
1004 return true;
1005
6fbe195d
AG
1006 default:
1007 return false;
1008 }
30dc78e1
JH
1009}
1010
ff9ef578
JH
1011void hci_discovery_set_state(struct hci_dev *hdev, int state)
1012{
1013 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1014
1015 if (hdev->discovery.state == state)
1016 return;
1017
1018 switch (state) {
1019 case DISCOVERY_STOPPED:
7b99b659
AG
1020 if (hdev->discovery.state != DISCOVERY_STARTING)
1021 mgmt_discovering(hdev, 0);
ff9ef578
JH
1022 break;
1023 case DISCOVERY_STARTING:
1024 break;
343f935b 1025 case DISCOVERY_FINDING:
ff9ef578
JH
1026 mgmt_discovering(hdev, 1);
1027 break;
30dc78e1
JH
1028 case DISCOVERY_RESOLVING:
1029 break;
ff9ef578
JH
1030 case DISCOVERY_STOPPING:
1031 break;
1032 }
1033
1034 hdev->discovery.state = state;
1035}
1036
1f9b9a5d 1037void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1038{
30883512 1039 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1040 struct inquiry_entry *p, *n;
1da177e4 1041
561aafbc
JH
1042 list_for_each_entry_safe(p, n, &cache->all, all) {
1043 list_del(&p->all);
b57c1a56 1044 kfree(p);
1da177e4 1045 }
561aafbc
JH
1046
1047 INIT_LIST_HEAD(&cache->unknown);
1048 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1049}
1050
a8c5fb1a
GP
1051struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1052 bdaddr_t *bdaddr)
1da177e4 1053{
30883512 1054 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1055 struct inquiry_entry *e;
1056
6ed93dc6 1057 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1058
561aafbc
JH
1059 list_for_each_entry(e, &cache->all, all) {
1060 if (!bacmp(&e->data.bdaddr, bdaddr))
1061 return e;
1062 }
1063
1064 return NULL;
1065}
1066
1067struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1068 bdaddr_t *bdaddr)
561aafbc 1069{
30883512 1070 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1071 struct inquiry_entry *e;
1072
6ed93dc6 1073 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1074
1075 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1076 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1077 return e;
1078 }
1079
1080 return NULL;
1da177e4
LT
1081}
1082
30dc78e1 1083struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1084 bdaddr_t *bdaddr,
1085 int state)
30dc78e1
JH
1086{
1087 struct discovery_state *cache = &hdev->discovery;
1088 struct inquiry_entry *e;
1089
6ed93dc6 1090 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1091
1092 list_for_each_entry(e, &cache->resolve, list) {
1093 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1094 return e;
1095 if (!bacmp(&e->data.bdaddr, bdaddr))
1096 return e;
1097 }
1098
1099 return NULL;
1100}
1101
a3d4e20a 1102void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1103 struct inquiry_entry *ie)
a3d4e20a
JH
1104{
1105 struct discovery_state *cache = &hdev->discovery;
1106 struct list_head *pos = &cache->resolve;
1107 struct inquiry_entry *p;
1108
1109 list_del(&ie->list);
1110
1111 list_for_each_entry(p, &cache->resolve, list) {
1112 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1113 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1114 break;
1115 pos = &p->list;
1116 }
1117
1118 list_add(&ie->list, pos);
1119}
1120
3175405b 1121bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1122 bool name_known, bool *ssp)
1da177e4 1123{
30883512 1124 struct discovery_state *cache = &hdev->discovery;
70f23020 1125 struct inquiry_entry *ie;
1da177e4 1126
6ed93dc6 1127 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1128
2b2fec4d
SJ
1129 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1130
388fc8fa
JH
1131 if (ssp)
1132 *ssp = data->ssp_mode;
1133
70f23020 1134 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1135 if (ie) {
388fc8fa
JH
1136 if (ie->data.ssp_mode && ssp)
1137 *ssp = true;
1138
a3d4e20a 1139 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1140 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1141 ie->data.rssi = data->rssi;
1142 hci_inquiry_cache_update_resolve(hdev, ie);
1143 }
1144
561aafbc 1145 goto update;
a3d4e20a 1146 }
561aafbc
JH
1147
1148 /* Entry not in the cache. Add new one. */
1149 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1150 if (!ie)
3175405b 1151 return false;
561aafbc
JH
1152
1153 list_add(&ie->all, &cache->all);
1154
1155 if (name_known) {
1156 ie->name_state = NAME_KNOWN;
1157 } else {
1158 ie->name_state = NAME_NOT_KNOWN;
1159 list_add(&ie->list, &cache->unknown);
1160 }
70f23020 1161
561aafbc
JH
1162update:
1163 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1164 ie->name_state != NAME_PENDING) {
561aafbc
JH
1165 ie->name_state = NAME_KNOWN;
1166 list_del(&ie->list);
1da177e4
LT
1167 }
1168
70f23020
AE
1169 memcpy(&ie->data, data, sizeof(*data));
1170 ie->timestamp = jiffies;
1da177e4 1171 cache->timestamp = jiffies;
3175405b
JH
1172
1173 if (ie->name_state == NAME_NOT_KNOWN)
1174 return false;
1175
1176 return true;
1da177e4
LT
1177}
1178
1179static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1180{
30883512 1181 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1182 struct inquiry_info *info = (struct inquiry_info *) buf;
1183 struct inquiry_entry *e;
1184 int copied = 0;
1185
561aafbc 1186 list_for_each_entry(e, &cache->all, all) {
1da177e4 1187 struct inquiry_data *data = &e->data;
b57c1a56
JH
1188
1189 if (copied >= num)
1190 break;
1191
1da177e4
LT
1192 bacpy(&info->bdaddr, &data->bdaddr);
1193 info->pscan_rep_mode = data->pscan_rep_mode;
1194 info->pscan_period_mode = data->pscan_period_mode;
1195 info->pscan_mode = data->pscan_mode;
1196 memcpy(info->dev_class, data->dev_class, 3);
1197 info->clock_offset = data->clock_offset;
b57c1a56 1198
1da177e4 1199 info++;
b57c1a56 1200 copied++;
1da177e4
LT
1201 }
1202
1203 BT_DBG("cache %p, copied %d", cache, copied);
1204 return copied;
1205}
1206
42c6b129 1207static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1208{
1209 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1210 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1211 struct hci_cp_inquiry cp;
1212
1213 BT_DBG("%s", hdev->name);
1214
1215 if (test_bit(HCI_INQUIRY, &hdev->flags))
1216 return;
1217
1218 /* Start Inquiry */
1219 memcpy(&cp.lap, &ir->lap, 3);
1220 cp.length = ir->length;
1221 cp.num_rsp = ir->num_rsp;
42c6b129 1222 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1223}
1224
3e13fa1e
AG
1225static int wait_inquiry(void *word)
1226{
1227 schedule();
1228 return signal_pending(current);
1229}
1230
1da177e4
LT
1231int hci_inquiry(void __user *arg)
1232{
1233 __u8 __user *ptr = arg;
1234 struct hci_inquiry_req ir;
1235 struct hci_dev *hdev;
1236 int err = 0, do_inquiry = 0, max_rsp;
1237 long timeo;
1238 __u8 *buf;
1239
1240 if (copy_from_user(&ir, ptr, sizeof(ir)))
1241 return -EFAULT;
1242
5a08ecce
AE
1243 hdev = hci_dev_get(ir.dev_id);
1244 if (!hdev)
1da177e4
LT
1245 return -ENODEV;
1246
0736cfa8
MH
1247 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1248 err = -EBUSY;
1249 goto done;
1250 }
1251
5b69bef5
MH
1252 if (hdev->dev_type != HCI_BREDR) {
1253 err = -EOPNOTSUPP;
1254 goto done;
1255 }
1256
56f87901
JH
1257 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1258 err = -EOPNOTSUPP;
1259 goto done;
1260 }
1261
09fd0de5 1262 hci_dev_lock(hdev);
8e87d142 1263 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1264 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1265 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1266 do_inquiry = 1;
1267 }
09fd0de5 1268 hci_dev_unlock(hdev);
1da177e4 1269
04837f64 1270 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1271
1272 if (do_inquiry) {
01178cd4
JH
1273 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1274 timeo);
70f23020
AE
1275 if (err < 0)
1276 goto done;
3e13fa1e
AG
1277
1278 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1279 * cleared). If it is interrupted by a signal, return -EINTR.
1280 */
1281 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1282 TASK_INTERRUPTIBLE))
1283 return -EINTR;
70f23020 1284 }
1da177e4 1285
8fc9ced3
GP
1286 /* for unlimited number of responses we will use buffer with
1287 * 255 entries
1288 */
1da177e4
LT
1289 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1290
1291 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1292 * copy it to the user space.
1293 */
01df8c31 1294 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1295 if (!buf) {
1da177e4
LT
1296 err = -ENOMEM;
1297 goto done;
1298 }
1299
09fd0de5 1300 hci_dev_lock(hdev);
1da177e4 1301 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1302 hci_dev_unlock(hdev);
1da177e4
LT
1303
1304 BT_DBG("num_rsp %d", ir.num_rsp);
1305
1306 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1307 ptr += sizeof(ir);
1308 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1309 ir.num_rsp))
1da177e4 1310 err = -EFAULT;
8e87d142 1311 } else
1da177e4
LT
1312 err = -EFAULT;
1313
1314 kfree(buf);
1315
1316done:
1317 hci_dev_put(hdev);
1318 return err;
1319}
1320
cbed0ca1 1321static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1322{
1da177e4
LT
1323 int ret = 0;
1324
1da177e4
LT
1325 BT_DBG("%s %p", hdev->name, hdev);
1326
1327 hci_req_lock(hdev);
1328
94324962
JH
1329 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1330 ret = -ENODEV;
1331 goto done;
1332 }
1333
a5c8f270
MH
1334 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1335 /* Check for rfkill but allow the HCI setup stage to
1336 * proceed (which in itself doesn't cause any RF activity).
1337 */
1338 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1339 ret = -ERFKILL;
1340 goto done;
1341 }
1342
1343 /* Check for valid public address or a configured static
1344 * random adddress, but let the HCI setup proceed to
1345 * be able to determine if there is a public address
1346 * or not.
1347 *
1348 * This check is only valid for BR/EDR controllers
1349 * since AMP controllers do not have an address.
1350 */
1351 if (hdev->dev_type == HCI_BREDR &&
1352 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1353 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1354 ret = -EADDRNOTAVAIL;
1355 goto done;
1356 }
611b30f7
MH
1357 }
1358
1da177e4
LT
1359 if (test_bit(HCI_UP, &hdev->flags)) {
1360 ret = -EALREADY;
1361 goto done;
1362 }
1363
1da177e4
LT
1364 if (hdev->open(hdev)) {
1365 ret = -EIO;
1366 goto done;
1367 }
1368
f41c70c4
MH
1369 atomic_set(&hdev->cmd_cnt, 1);
1370 set_bit(HCI_INIT, &hdev->flags);
1371
1372 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1373 ret = hdev->setup(hdev);
1374
1375 if (!ret) {
f41c70c4
MH
1376 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1377 set_bit(HCI_RAW, &hdev->flags);
1378
0736cfa8
MH
1379 if (!test_bit(HCI_RAW, &hdev->flags) &&
1380 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1381 ret = __hci_init(hdev);
1da177e4
LT
1382 }
1383
f41c70c4
MH
1384 clear_bit(HCI_INIT, &hdev->flags);
1385
1da177e4
LT
1386 if (!ret) {
1387 hci_dev_hold(hdev);
1388 set_bit(HCI_UP, &hdev->flags);
1389 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1390 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1391 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1392 hdev->dev_type == HCI_BREDR) {
09fd0de5 1393 hci_dev_lock(hdev);
744cf19e 1394 mgmt_powered(hdev, 1);
09fd0de5 1395 hci_dev_unlock(hdev);
56e5cb86 1396 }
8e87d142 1397 } else {
1da177e4 1398 /* Init failed, cleanup */
3eff45ea 1399 flush_work(&hdev->tx_work);
c347b765 1400 flush_work(&hdev->cmd_work);
b78752cc 1401 flush_work(&hdev->rx_work);
1da177e4
LT
1402
1403 skb_queue_purge(&hdev->cmd_q);
1404 skb_queue_purge(&hdev->rx_q);
1405
1406 if (hdev->flush)
1407 hdev->flush(hdev);
1408
1409 if (hdev->sent_cmd) {
1410 kfree_skb(hdev->sent_cmd);
1411 hdev->sent_cmd = NULL;
1412 }
1413
1414 hdev->close(hdev);
1415 hdev->flags = 0;
1416 }
1417
1418done:
1419 hci_req_unlock(hdev);
1da177e4
LT
1420 return ret;
1421}
1422
cbed0ca1
JH
1423/* ---- HCI ioctl helpers ---- */
1424
1425int hci_dev_open(__u16 dev)
1426{
1427 struct hci_dev *hdev;
1428 int err;
1429
1430 hdev = hci_dev_get(dev);
1431 if (!hdev)
1432 return -ENODEV;
1433
e1d08f40
JH
1434 /* We need to ensure that no other power on/off work is pending
1435 * before proceeding to call hci_dev_do_open. This is
1436 * particularly important if the setup procedure has not yet
1437 * completed.
1438 */
1439 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1440 cancel_delayed_work(&hdev->power_off);
1441
a5c8f270
MH
1442 /* After this call it is guaranteed that the setup procedure
1443 * has finished. This means that error conditions like RFKILL
1444 * or no valid public or static random address apply.
1445 */
e1d08f40
JH
1446 flush_workqueue(hdev->req_workqueue);
1447
cbed0ca1
JH
1448 err = hci_dev_do_open(hdev);
1449
1450 hci_dev_put(hdev);
1451
1452 return err;
1453}
1454
1da177e4
LT
1455static int hci_dev_do_close(struct hci_dev *hdev)
1456{
1457 BT_DBG("%s %p", hdev->name, hdev);
1458
78c04c0b
VCG
1459 cancel_delayed_work(&hdev->power_off);
1460
1da177e4
LT
1461 hci_req_cancel(hdev, ENODEV);
1462 hci_req_lock(hdev);
1463
1464 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1465 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1466 hci_req_unlock(hdev);
1467 return 0;
1468 }
1469
3eff45ea
GP
1470 /* Flush RX and TX works */
1471 flush_work(&hdev->tx_work);
b78752cc 1472 flush_work(&hdev->rx_work);
1da177e4 1473
16ab91ab 1474 if (hdev->discov_timeout > 0) {
e0f9309f 1475 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1476 hdev->discov_timeout = 0;
5e5282bb 1477 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 1478 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1479 }
1480
a8b2d5c2 1481 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1482 cancel_delayed_work(&hdev->service_cache);
1483
7ba8b4be
AG
1484 cancel_delayed_work_sync(&hdev->le_scan_disable);
1485
09fd0de5 1486 hci_dev_lock(hdev);
1f9b9a5d 1487 hci_inquiry_cache_flush(hdev);
1da177e4 1488 hci_conn_hash_flush(hdev);
09fd0de5 1489 hci_dev_unlock(hdev);
1da177e4
LT
1490
1491 hci_notify(hdev, HCI_DEV_DOWN);
1492
1493 if (hdev->flush)
1494 hdev->flush(hdev);
1495
1496 /* Reset device */
1497 skb_queue_purge(&hdev->cmd_q);
1498 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1499 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 1500 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 1501 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1502 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1503 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1504 clear_bit(HCI_INIT, &hdev->flags);
1505 }
1506
c347b765
GP
1507 /* flush cmd work */
1508 flush_work(&hdev->cmd_work);
1da177e4
LT
1509
1510 /* Drop queues */
1511 skb_queue_purge(&hdev->rx_q);
1512 skb_queue_purge(&hdev->cmd_q);
1513 skb_queue_purge(&hdev->raw_q);
1514
1515 /* Drop last sent command */
1516 if (hdev->sent_cmd) {
b79f44c1 1517 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1518 kfree_skb(hdev->sent_cmd);
1519 hdev->sent_cmd = NULL;
1520 }
1521
b6ddb638
JH
1522 kfree_skb(hdev->recv_evt);
1523 hdev->recv_evt = NULL;
1524
1da177e4
LT
1525 /* After this point our queues are empty
1526 * and no tasks are scheduled. */
1527 hdev->close(hdev);
1528
35b973c9
JH
1529 /* Clear flags */
1530 hdev->flags = 0;
1531 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1532
93c311a0
MH
1533 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1534 if (hdev->dev_type == HCI_BREDR) {
1535 hci_dev_lock(hdev);
1536 mgmt_powered(hdev, 0);
1537 hci_dev_unlock(hdev);
1538 }
8ee56540 1539 }
5add6af8 1540
ced5c338 1541 /* Controller radio is available but is currently powered down */
536619e8 1542 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1543
e59fda8d 1544 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1545 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1546
1da177e4
LT
1547 hci_req_unlock(hdev);
1548
1549 hci_dev_put(hdev);
1550 return 0;
1551}
1552
1553int hci_dev_close(__u16 dev)
1554{
1555 struct hci_dev *hdev;
1556 int err;
1557
70f23020
AE
1558 hdev = hci_dev_get(dev);
1559 if (!hdev)
1da177e4 1560 return -ENODEV;
8ee56540 1561
0736cfa8
MH
1562 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1563 err = -EBUSY;
1564 goto done;
1565 }
1566
8ee56540
MH
1567 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1568 cancel_delayed_work(&hdev->power_off);
1569
1da177e4 1570 err = hci_dev_do_close(hdev);
8ee56540 1571
0736cfa8 1572done:
1da177e4
LT
1573 hci_dev_put(hdev);
1574 return err;
1575}
1576
1577int hci_dev_reset(__u16 dev)
1578{
1579 struct hci_dev *hdev;
1580 int ret = 0;
1581
70f23020
AE
1582 hdev = hci_dev_get(dev);
1583 if (!hdev)
1da177e4
LT
1584 return -ENODEV;
1585
1586 hci_req_lock(hdev);
1da177e4 1587
808a049e
MH
1588 if (!test_bit(HCI_UP, &hdev->flags)) {
1589 ret = -ENETDOWN;
1da177e4 1590 goto done;
808a049e 1591 }
1da177e4 1592
0736cfa8
MH
1593 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1594 ret = -EBUSY;
1595 goto done;
1596 }
1597
1da177e4
LT
1598 /* Drop queues */
1599 skb_queue_purge(&hdev->rx_q);
1600 skb_queue_purge(&hdev->cmd_q);
1601
09fd0de5 1602 hci_dev_lock(hdev);
1f9b9a5d 1603 hci_inquiry_cache_flush(hdev);
1da177e4 1604 hci_conn_hash_flush(hdev);
09fd0de5 1605 hci_dev_unlock(hdev);
1da177e4
LT
1606
1607 if (hdev->flush)
1608 hdev->flush(hdev);
1609
8e87d142 1610 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1611 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1612
1613 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1614 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1615
1616done:
1da177e4
LT
1617 hci_req_unlock(hdev);
1618 hci_dev_put(hdev);
1619 return ret;
1620}
1621
1622int hci_dev_reset_stat(__u16 dev)
1623{
1624 struct hci_dev *hdev;
1625 int ret = 0;
1626
70f23020
AE
1627 hdev = hci_dev_get(dev);
1628 if (!hdev)
1da177e4
LT
1629 return -ENODEV;
1630
0736cfa8
MH
1631 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1632 ret = -EBUSY;
1633 goto done;
1634 }
1635
1da177e4
LT
1636 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1637
0736cfa8 1638done:
1da177e4 1639 hci_dev_put(hdev);
1da177e4
LT
1640 return ret;
1641}
1642
1643int hci_dev_cmd(unsigned int cmd, void __user *arg)
1644{
1645 struct hci_dev *hdev;
1646 struct hci_dev_req dr;
1647 int err = 0;
1648
1649 if (copy_from_user(&dr, arg, sizeof(dr)))
1650 return -EFAULT;
1651
70f23020
AE
1652 hdev = hci_dev_get(dr.dev_id);
1653 if (!hdev)
1da177e4
LT
1654 return -ENODEV;
1655
0736cfa8
MH
1656 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1657 err = -EBUSY;
1658 goto done;
1659 }
1660
5b69bef5
MH
1661 if (hdev->dev_type != HCI_BREDR) {
1662 err = -EOPNOTSUPP;
1663 goto done;
1664 }
1665
56f87901
JH
1666 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1667 err = -EOPNOTSUPP;
1668 goto done;
1669 }
1670
1da177e4
LT
1671 switch (cmd) {
1672 case HCISETAUTH:
01178cd4
JH
1673 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1674 HCI_INIT_TIMEOUT);
1da177e4
LT
1675 break;
1676
1677 case HCISETENCRYPT:
1678 if (!lmp_encrypt_capable(hdev)) {
1679 err = -EOPNOTSUPP;
1680 break;
1681 }
1682
1683 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1684 /* Auth must be enabled first */
01178cd4
JH
1685 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1686 HCI_INIT_TIMEOUT);
1da177e4
LT
1687 if (err)
1688 break;
1689 }
1690
01178cd4
JH
1691 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1692 HCI_INIT_TIMEOUT);
1da177e4
LT
1693 break;
1694
1695 case HCISETSCAN:
01178cd4
JH
1696 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1697 HCI_INIT_TIMEOUT);
1da177e4
LT
1698 break;
1699
1da177e4 1700 case HCISETLINKPOL:
01178cd4
JH
1701 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1702 HCI_INIT_TIMEOUT);
1da177e4
LT
1703 break;
1704
1705 case HCISETLINKMODE:
e4e8e37c
MH
1706 hdev->link_mode = ((__u16) dr.dev_opt) &
1707 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1708 break;
1709
1710 case HCISETPTYPE:
1711 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1712 break;
1713
1714 case HCISETACLMTU:
e4e8e37c
MH
1715 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1716 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1717 break;
1718
1719 case HCISETSCOMTU:
e4e8e37c
MH
1720 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1721 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1722 break;
1723
1724 default:
1725 err = -EINVAL;
1726 break;
1727 }
e4e8e37c 1728
0736cfa8 1729done:
1da177e4
LT
1730 hci_dev_put(hdev);
1731 return err;
1732}
1733
1734int hci_get_dev_list(void __user *arg)
1735{
8035ded4 1736 struct hci_dev *hdev;
1da177e4
LT
1737 struct hci_dev_list_req *dl;
1738 struct hci_dev_req *dr;
1da177e4
LT
1739 int n = 0, size, err;
1740 __u16 dev_num;
1741
1742 if (get_user(dev_num, (__u16 __user *) arg))
1743 return -EFAULT;
1744
1745 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1746 return -EINVAL;
1747
1748 size = sizeof(*dl) + dev_num * sizeof(*dr);
1749
70f23020
AE
1750 dl = kzalloc(size, GFP_KERNEL);
1751 if (!dl)
1da177e4
LT
1752 return -ENOMEM;
1753
1754 dr = dl->dev_req;
1755
f20d09d5 1756 read_lock(&hci_dev_list_lock);
8035ded4 1757 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1758 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1759 cancel_delayed_work(&hdev->power_off);
c542a06c 1760
a8b2d5c2
JH
1761 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1762 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1763
1da177e4
LT
1764 (dr + n)->dev_id = hdev->id;
1765 (dr + n)->dev_opt = hdev->flags;
c542a06c 1766
1da177e4
LT
1767 if (++n >= dev_num)
1768 break;
1769 }
f20d09d5 1770 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1771
1772 dl->dev_num = n;
1773 size = sizeof(*dl) + n * sizeof(*dr);
1774
1775 err = copy_to_user(arg, dl, size);
1776 kfree(dl);
1777
1778 return err ? -EFAULT : 0;
1779}
1780
1781int hci_get_dev_info(void __user *arg)
1782{
1783 struct hci_dev *hdev;
1784 struct hci_dev_info di;
1785 int err = 0;
1786
1787 if (copy_from_user(&di, arg, sizeof(di)))
1788 return -EFAULT;
1789
70f23020
AE
1790 hdev = hci_dev_get(di.dev_id);
1791 if (!hdev)
1da177e4
LT
1792 return -ENODEV;
1793
a8b2d5c2 1794 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1795 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1796
a8b2d5c2
JH
1797 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1798 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1799
1da177e4
LT
1800 strcpy(di.name, hdev->name);
1801 di.bdaddr = hdev->bdaddr;
60f2a3ed 1802 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
1803 di.flags = hdev->flags;
1804 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1805 if (lmp_bredr_capable(hdev)) {
1806 di.acl_mtu = hdev->acl_mtu;
1807 di.acl_pkts = hdev->acl_pkts;
1808 di.sco_mtu = hdev->sco_mtu;
1809 di.sco_pkts = hdev->sco_pkts;
1810 } else {
1811 di.acl_mtu = hdev->le_mtu;
1812 di.acl_pkts = hdev->le_pkts;
1813 di.sco_mtu = 0;
1814 di.sco_pkts = 0;
1815 }
1da177e4
LT
1816 di.link_policy = hdev->link_policy;
1817 di.link_mode = hdev->link_mode;
1818
1819 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1820 memcpy(&di.features, &hdev->features, sizeof(di.features));
1821
1822 if (copy_to_user(arg, &di, sizeof(di)))
1823 err = -EFAULT;
1824
1825 hci_dev_put(hdev);
1826
1827 return err;
1828}
1829
1830/* ---- Interface to HCI drivers ---- */
1831
611b30f7
MH
1832static int hci_rfkill_set_block(void *data, bool blocked)
1833{
1834 struct hci_dev *hdev = data;
1835
1836 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1837
0736cfa8
MH
1838 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1839 return -EBUSY;
1840
5e130367
JH
1841 if (blocked) {
1842 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
1843 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1844 hci_dev_do_close(hdev);
5e130367
JH
1845 } else {
1846 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 1847 }
611b30f7
MH
1848
1849 return 0;
1850}
1851
1852static const struct rfkill_ops hci_rfkill_ops = {
1853 .set_block = hci_rfkill_set_block,
1854};
1855
ab81cbf9
JH
1856static void hci_power_on(struct work_struct *work)
1857{
1858 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 1859 int err;
ab81cbf9
JH
1860
1861 BT_DBG("%s", hdev->name);
1862
cbed0ca1 1863 err = hci_dev_do_open(hdev);
96570ffc
JH
1864 if (err < 0) {
1865 mgmt_set_powered_failed(hdev, err);
ab81cbf9 1866 return;
96570ffc 1867 }
ab81cbf9 1868
a5c8f270
MH
1869 /* During the HCI setup phase, a few error conditions are
1870 * ignored and they need to be checked now. If they are still
1871 * valid, it is important to turn the device back off.
1872 */
1873 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1874 (hdev->dev_type == HCI_BREDR &&
1875 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1876 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
1877 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1878 hci_dev_do_close(hdev);
1879 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
1880 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1881 HCI_AUTO_OFF_TIMEOUT);
bf543036 1882 }
ab81cbf9 1883
a8b2d5c2 1884 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1885 mgmt_index_added(hdev);
ab81cbf9
JH
1886}
1887
1888static void hci_power_off(struct work_struct *work)
1889{
3243553f 1890 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1891 power_off.work);
ab81cbf9
JH
1892
1893 BT_DBG("%s", hdev->name);
1894
8ee56540 1895 hci_dev_do_close(hdev);
ab81cbf9
JH
1896}
1897
16ab91ab
JH
1898static void hci_discov_off(struct work_struct *work)
1899{
1900 struct hci_dev *hdev;
16ab91ab
JH
1901
1902 hdev = container_of(work, struct hci_dev, discov_off.work);
1903
1904 BT_DBG("%s", hdev->name);
1905
d1967ff8 1906 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
1907}
1908
2aeb9a1a
JH
1909int hci_uuids_clear(struct hci_dev *hdev)
1910{
4821002c 1911 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1912
4821002c
JH
1913 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1914 list_del(&uuid->list);
2aeb9a1a
JH
1915 kfree(uuid);
1916 }
1917
1918 return 0;
1919}
1920
55ed8ca1
JH
1921int hci_link_keys_clear(struct hci_dev *hdev)
1922{
1923 struct list_head *p, *n;
1924
1925 list_for_each_safe(p, n, &hdev->link_keys) {
1926 struct link_key *key;
1927
1928 key = list_entry(p, struct link_key, list);
1929
1930 list_del(p);
1931 kfree(key);
1932 }
1933
1934 return 0;
1935}
1936
b899efaf
VCG
1937int hci_smp_ltks_clear(struct hci_dev *hdev)
1938{
1939 struct smp_ltk *k, *tmp;
1940
1941 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1942 list_del(&k->list);
1943 kfree(k);
1944 }
1945
1946 return 0;
1947}
1948
55ed8ca1
JH
1949struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1950{
8035ded4 1951 struct link_key *k;
55ed8ca1 1952
8035ded4 1953 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1954 if (bacmp(bdaddr, &k->bdaddr) == 0)
1955 return k;
55ed8ca1
JH
1956
1957 return NULL;
1958}
1959
745c0ce3 1960static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1961 u8 key_type, u8 old_key_type)
d25e28ab
JH
1962{
1963 /* Legacy key */
1964 if (key_type < 0x03)
745c0ce3 1965 return true;
d25e28ab
JH
1966
1967 /* Debug keys are insecure so don't store them persistently */
1968 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1969 return false;
d25e28ab
JH
1970
1971 /* Changed combination key and there's no previous one */
1972 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1973 return false;
d25e28ab
JH
1974
1975 /* Security mode 3 case */
1976 if (!conn)
745c0ce3 1977 return true;
d25e28ab
JH
1978
1979 /* Neither local nor remote side had no-bonding as requirement */
1980 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1981 return true;
d25e28ab
JH
1982
1983 /* Local side had dedicated bonding as requirement */
1984 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1985 return true;
d25e28ab
JH
1986
1987 /* Remote side had dedicated bonding as requirement */
1988 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1989 return true;
d25e28ab
JH
1990
1991 /* If none of the above criteria match, then don't store the key
1992 * persistently */
745c0ce3 1993 return false;
d25e28ab
JH
1994}
1995
c9839a11 1996struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1997{
c9839a11 1998 struct smp_ltk *k;
75d262c2 1999
c9839a11
VCG
2000 list_for_each_entry(k, &hdev->long_term_keys, list) {
2001 if (k->ediv != ediv ||
a8c5fb1a 2002 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2003 continue;
2004
c9839a11 2005 return k;
75d262c2
VCG
2006 }
2007
2008 return NULL;
2009}
75d262c2 2010
c9839a11 2011struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 2012 u8 addr_type)
75d262c2 2013{
c9839a11 2014 struct smp_ltk *k;
75d262c2 2015
c9839a11
VCG
2016 list_for_each_entry(k, &hdev->long_term_keys, list)
2017 if (addr_type == k->bdaddr_type &&
a8c5fb1a 2018 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
2019 return k;
2020
2021 return NULL;
2022}
75d262c2 2023
d25e28ab 2024int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2025 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2026{
2027 struct link_key *key, *old_key;
745c0ce3
VA
2028 u8 old_key_type;
2029 bool persistent;
55ed8ca1
JH
2030
2031 old_key = hci_find_link_key(hdev, bdaddr);
2032 if (old_key) {
2033 old_key_type = old_key->type;
2034 key = old_key;
2035 } else {
12adcf3a 2036 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
2037 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2038 if (!key)
2039 return -ENOMEM;
2040 list_add(&key->list, &hdev->link_keys);
2041 }
2042
6ed93dc6 2043 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2044
d25e28ab
JH
2045 /* Some buggy controller combinations generate a changed
2046 * combination key for legacy pairing even when there's no
2047 * previous key */
2048 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2049 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2050 type = HCI_LK_COMBINATION;
655fe6ec
JH
2051 if (conn)
2052 conn->key_type = type;
2053 }
d25e28ab 2054
55ed8ca1 2055 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2056 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2057 key->pin_len = pin_len;
2058
b6020ba0 2059 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2060 key->type = old_key_type;
4748fed2
JH
2061 else
2062 key->type = type;
2063
4df378a1
JH
2064 if (!new_key)
2065 return 0;
2066
2067 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2068
744cf19e 2069 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2070
6ec5bcad
VA
2071 if (conn)
2072 conn->flush_key = !persistent;
55ed8ca1
JH
2073
2074 return 0;
2075}
2076
c9839a11 2077int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 2078 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 2079 ediv, u8 rand[8])
75d262c2 2080{
c9839a11 2081 struct smp_ltk *key, *old_key;
75d262c2 2082
c9839a11
VCG
2083 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2084 return 0;
75d262c2 2085
c9839a11
VCG
2086 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2087 if (old_key)
75d262c2 2088 key = old_key;
c9839a11
VCG
2089 else {
2090 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
2091 if (!key)
2092 return -ENOMEM;
c9839a11 2093 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2094 }
2095
75d262c2 2096 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2097 key->bdaddr_type = addr_type;
2098 memcpy(key->val, tk, sizeof(key->val));
2099 key->authenticated = authenticated;
2100 key->ediv = ediv;
2101 key->enc_size = enc_size;
2102 key->type = type;
2103 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2104
c9839a11
VCG
2105 if (!new_key)
2106 return 0;
75d262c2 2107
261cc5aa
VCG
2108 if (type & HCI_SMP_LTK)
2109 mgmt_new_ltk(hdev, key, 1);
2110
75d262c2
VCG
2111 return 0;
2112}
2113
55ed8ca1
JH
2114int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2115{
2116 struct link_key *key;
2117
2118 key = hci_find_link_key(hdev, bdaddr);
2119 if (!key)
2120 return -ENOENT;
2121
6ed93dc6 2122 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2123
2124 list_del(&key->list);
2125 kfree(key);
2126
2127 return 0;
2128}
2129
b899efaf
VCG
2130int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2131{
2132 struct smp_ltk *k, *tmp;
2133
2134 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2135 if (bacmp(bdaddr, &k->bdaddr))
2136 continue;
2137
6ed93dc6 2138 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2139
2140 list_del(&k->list);
2141 kfree(k);
2142 }
2143
2144 return 0;
2145}
2146
6bd32326 2147/* HCI command timer function */
bda4f23a 2148static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2149{
2150 struct hci_dev *hdev = (void *) arg;
2151
bda4f23a
AE
2152 if (hdev->sent_cmd) {
2153 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2154 u16 opcode = __le16_to_cpu(sent->opcode);
2155
2156 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2157 } else {
2158 BT_ERR("%s command tx timeout", hdev->name);
2159 }
2160
6bd32326 2161 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2162 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2163}
2164
2763eda6 2165struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2166 bdaddr_t *bdaddr)
2763eda6
SJ
2167{
2168 struct oob_data *data;
2169
2170 list_for_each_entry(data, &hdev->remote_oob_data, list)
2171 if (bacmp(bdaddr, &data->bdaddr) == 0)
2172 return data;
2173
2174 return NULL;
2175}
2176
2177int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2178{
2179 struct oob_data *data;
2180
2181 data = hci_find_remote_oob_data(hdev, bdaddr);
2182 if (!data)
2183 return -ENOENT;
2184
6ed93dc6 2185 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2186
2187 list_del(&data->list);
2188 kfree(data);
2189
2190 return 0;
2191}
2192
2193int hci_remote_oob_data_clear(struct hci_dev *hdev)
2194{
2195 struct oob_data *data, *n;
2196
2197 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2198 list_del(&data->list);
2199 kfree(data);
2200 }
2201
2202 return 0;
2203}
2204
2205int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 2206 u8 *randomizer)
2763eda6
SJ
2207{
2208 struct oob_data *data;
2209
2210 data = hci_find_remote_oob_data(hdev, bdaddr);
2211
2212 if (!data) {
2213 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2214 if (!data)
2215 return -ENOMEM;
2216
2217 bacpy(&data->bdaddr, bdaddr);
2218 list_add(&data->list, &hdev->remote_oob_data);
2219 }
2220
2221 memcpy(data->hash, hash, sizeof(data->hash));
2222 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2223
6ed93dc6 2224 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2225
2226 return 0;
2227}
2228
b9ee0a78
MH
2229struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2230 bdaddr_t *bdaddr, u8 type)
b2a66aad 2231{
8035ded4 2232 struct bdaddr_list *b;
b2a66aad 2233
b9ee0a78
MH
2234 list_for_each_entry(b, &hdev->blacklist, list) {
2235 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2236 return b;
b9ee0a78 2237 }
b2a66aad
AJ
2238
2239 return NULL;
2240}
2241
2242int hci_blacklist_clear(struct hci_dev *hdev)
2243{
2244 struct list_head *p, *n;
2245
2246 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 2247 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2248
2249 list_del(p);
2250 kfree(b);
2251 }
2252
2253 return 0;
2254}
2255
88c1fe4b 2256int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2257{
2258 struct bdaddr_list *entry;
b2a66aad 2259
b9ee0a78 2260 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2261 return -EBADF;
2262
b9ee0a78 2263 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 2264 return -EEXIST;
b2a66aad
AJ
2265
2266 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2267 if (!entry)
2268 return -ENOMEM;
b2a66aad
AJ
2269
2270 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2271 entry->bdaddr_type = type;
b2a66aad
AJ
2272
2273 list_add(&entry->list, &hdev->blacklist);
2274
88c1fe4b 2275 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2276}
2277
88c1fe4b 2278int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2279{
2280 struct bdaddr_list *entry;
b2a66aad 2281
b9ee0a78 2282 if (!bacmp(bdaddr, BDADDR_ANY))
5e762444 2283 return hci_blacklist_clear(hdev);
b2a66aad 2284
b9ee0a78 2285 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 2286 if (!entry)
5e762444 2287 return -ENOENT;
b2a66aad
AJ
2288
2289 list_del(&entry->list);
2290 kfree(entry);
2291
88c1fe4b 2292 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2293}
2294
4c87eaab 2295static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2296{
4c87eaab
AG
2297 if (status) {
2298 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2299
4c87eaab
AG
2300 hci_dev_lock(hdev);
2301 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2302 hci_dev_unlock(hdev);
2303 return;
2304 }
7ba8b4be
AG
2305}
2306
4c87eaab 2307static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2308{
4c87eaab
AG
2309 /* General inquiry access code (GIAC) */
2310 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2311 struct hci_request req;
2312 struct hci_cp_inquiry cp;
7ba8b4be
AG
2313 int err;
2314
4c87eaab
AG
2315 if (status) {
2316 BT_ERR("Failed to disable LE scanning: status %d", status);
2317 return;
2318 }
7ba8b4be 2319
4c87eaab
AG
2320 switch (hdev->discovery.type) {
2321 case DISCOV_TYPE_LE:
2322 hci_dev_lock(hdev);
2323 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2324 hci_dev_unlock(hdev);
2325 break;
7ba8b4be 2326
4c87eaab
AG
2327 case DISCOV_TYPE_INTERLEAVED:
2328 hci_req_init(&req, hdev);
7ba8b4be 2329
4c87eaab
AG
2330 memset(&cp, 0, sizeof(cp));
2331 memcpy(&cp.lap, lap, sizeof(cp.lap));
2332 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2333 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2334
4c87eaab 2335 hci_dev_lock(hdev);
7dbfac1d 2336
4c87eaab 2337 hci_inquiry_cache_flush(hdev);
7dbfac1d 2338
4c87eaab
AG
2339 err = hci_req_run(&req, inquiry_complete);
2340 if (err) {
2341 BT_ERR("Inquiry request failed: err %d", err);
2342 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2343 }
7dbfac1d 2344
4c87eaab
AG
2345 hci_dev_unlock(hdev);
2346 break;
7dbfac1d 2347 }
7dbfac1d
AG
2348}
2349
7ba8b4be
AG
2350static void le_scan_disable_work(struct work_struct *work)
2351{
2352 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2353 le_scan_disable.work);
7ba8b4be 2354 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
2355 struct hci_request req;
2356 int err;
7ba8b4be
AG
2357
2358 BT_DBG("%s", hdev->name);
2359
4c87eaab 2360 hci_req_init(&req, hdev);
28b75a89 2361
7ba8b4be 2362 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
2363 cp.enable = LE_SCAN_DISABLE;
2364 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 2365
4c87eaab
AG
2366 err = hci_req_run(&req, le_scan_disable_work_complete);
2367 if (err)
2368 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2369}
2370
9be0dab7
DH
2371/* Alloc HCI device */
2372struct hci_dev *hci_alloc_dev(void)
2373{
2374 struct hci_dev *hdev;
2375
2376 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2377 if (!hdev)
2378 return NULL;
2379
b1b813d4
DH
2380 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2381 hdev->esco_type = (ESCO_HV1);
2382 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2383 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2384 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2385 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2386 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2387
b1b813d4
DH
2388 hdev->sniff_max_interval = 800;
2389 hdev->sniff_min_interval = 80;
2390
bef64738
MH
2391 hdev->le_scan_interval = 0x0060;
2392 hdev->le_scan_window = 0x0030;
2393
b1b813d4
DH
2394 mutex_init(&hdev->lock);
2395 mutex_init(&hdev->req_lock);
2396
2397 INIT_LIST_HEAD(&hdev->mgmt_pending);
2398 INIT_LIST_HEAD(&hdev->blacklist);
2399 INIT_LIST_HEAD(&hdev->uuids);
2400 INIT_LIST_HEAD(&hdev->link_keys);
2401 INIT_LIST_HEAD(&hdev->long_term_keys);
2402 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2403 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2404
2405 INIT_WORK(&hdev->rx_work, hci_rx_work);
2406 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2407 INIT_WORK(&hdev->tx_work, hci_tx_work);
2408 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2409
b1b813d4
DH
2410 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2411 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2412 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2413
b1b813d4
DH
2414 skb_queue_head_init(&hdev->rx_q);
2415 skb_queue_head_init(&hdev->cmd_q);
2416 skb_queue_head_init(&hdev->raw_q);
2417
2418 init_waitqueue_head(&hdev->req_wait_q);
2419
bda4f23a 2420 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2421
b1b813d4
DH
2422 hci_init_sysfs(hdev);
2423 discovery_init(hdev);
9be0dab7
DH
2424
2425 return hdev;
2426}
2427EXPORT_SYMBOL(hci_alloc_dev);
2428
2429/* Free HCI device */
2430void hci_free_dev(struct hci_dev *hdev)
2431{
9be0dab7
DH
2432 /* will free via device release */
2433 put_device(&hdev->dev);
2434}
2435EXPORT_SYMBOL(hci_free_dev);
2436
1da177e4
LT
2437/* Register HCI device */
2438int hci_register_dev(struct hci_dev *hdev)
2439{
b1b813d4 2440 int id, error;
1da177e4 2441
010666a1 2442 if (!hdev->open || !hdev->close)
1da177e4
LT
2443 return -EINVAL;
2444
08add513
MM
2445 /* Do not allow HCI_AMP devices to register at index 0,
2446 * so the index can be used as the AMP controller ID.
2447 */
3df92b31
SL
2448 switch (hdev->dev_type) {
2449 case HCI_BREDR:
2450 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2451 break;
2452 case HCI_AMP:
2453 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2454 break;
2455 default:
2456 return -EINVAL;
1da177e4 2457 }
8e87d142 2458
3df92b31
SL
2459 if (id < 0)
2460 return id;
2461
1da177e4
LT
2462 sprintf(hdev->name, "hci%d", id);
2463 hdev->id = id;
2d8b3a11
AE
2464
2465 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2466
d8537548
KC
2467 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2468 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
2469 if (!hdev->workqueue) {
2470 error = -ENOMEM;
2471 goto err;
2472 }
f48fd9c8 2473
d8537548
KC
2474 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2475 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
2476 if (!hdev->req_workqueue) {
2477 destroy_workqueue(hdev->workqueue);
2478 error = -ENOMEM;
2479 goto err;
2480 }
2481
33ca954d
DH
2482 error = hci_add_sysfs(hdev);
2483 if (error < 0)
2484 goto err_wqueue;
1da177e4 2485
611b30f7 2486 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2487 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2488 hdev);
611b30f7
MH
2489 if (hdev->rfkill) {
2490 if (rfkill_register(hdev->rfkill) < 0) {
2491 rfkill_destroy(hdev->rfkill);
2492 hdev->rfkill = NULL;
2493 }
2494 }
2495
5e130367
JH
2496 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2497 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2498
a8b2d5c2 2499 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 2500 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 2501
01cd3404 2502 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
2503 /* Assume BR/EDR support until proven otherwise (such as
2504 * through reading supported features during init.
2505 */
2506 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2507 }
ce2be9ac 2508
fcee3377
GP
2509 write_lock(&hci_dev_list_lock);
2510 list_add(&hdev->list, &hci_dev_list);
2511 write_unlock(&hci_dev_list_lock);
2512
1da177e4 2513 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2514 hci_dev_hold(hdev);
1da177e4 2515
19202573 2516 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2517
1da177e4 2518 return id;
f48fd9c8 2519
33ca954d
DH
2520err_wqueue:
2521 destroy_workqueue(hdev->workqueue);
6ead1bbc 2522 destroy_workqueue(hdev->req_workqueue);
33ca954d 2523err:
3df92b31 2524 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 2525
33ca954d 2526 return error;
1da177e4
LT
2527}
2528EXPORT_SYMBOL(hci_register_dev);
2529
2530/* Unregister HCI device */
59735631 2531void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2532{
3df92b31 2533 int i, id;
ef222013 2534
c13854ce 2535 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2536
94324962
JH
2537 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2538
3df92b31
SL
2539 id = hdev->id;
2540
f20d09d5 2541 write_lock(&hci_dev_list_lock);
1da177e4 2542 list_del(&hdev->list);
f20d09d5 2543 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2544
2545 hci_dev_do_close(hdev);
2546
cd4c5391 2547 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2548 kfree_skb(hdev->reassembly[i]);
2549
b9b5ef18
GP
2550 cancel_work_sync(&hdev->power_on);
2551
ab81cbf9 2552 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2553 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2554 hci_dev_lock(hdev);
744cf19e 2555 mgmt_index_removed(hdev);
09fd0de5 2556 hci_dev_unlock(hdev);
56e5cb86 2557 }
ab81cbf9 2558
2e58ef3e
JH
2559 /* mgmt_index_removed should take care of emptying the
2560 * pending list */
2561 BUG_ON(!list_empty(&hdev->mgmt_pending));
2562
1da177e4
LT
2563 hci_notify(hdev, HCI_DEV_UNREG);
2564
611b30f7
MH
2565 if (hdev->rfkill) {
2566 rfkill_unregister(hdev->rfkill);
2567 rfkill_destroy(hdev->rfkill);
2568 }
2569
ce242970 2570 hci_del_sysfs(hdev);
147e2d59 2571
f48fd9c8 2572 destroy_workqueue(hdev->workqueue);
6ead1bbc 2573 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2574
09fd0de5 2575 hci_dev_lock(hdev);
e2e0cacb 2576 hci_blacklist_clear(hdev);
2aeb9a1a 2577 hci_uuids_clear(hdev);
55ed8ca1 2578 hci_link_keys_clear(hdev);
b899efaf 2579 hci_smp_ltks_clear(hdev);
2763eda6 2580 hci_remote_oob_data_clear(hdev);
09fd0de5 2581 hci_dev_unlock(hdev);
e2e0cacb 2582
dc946bd8 2583 hci_dev_put(hdev);
3df92b31
SL
2584
2585 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2586}
2587EXPORT_SYMBOL(hci_unregister_dev);
2588
2589/* Suspend HCI device */
2590int hci_suspend_dev(struct hci_dev *hdev)
2591{
2592 hci_notify(hdev, HCI_DEV_SUSPEND);
2593 return 0;
2594}
2595EXPORT_SYMBOL(hci_suspend_dev);
2596
2597/* Resume HCI device */
2598int hci_resume_dev(struct hci_dev *hdev)
2599{
2600 hci_notify(hdev, HCI_DEV_RESUME);
2601 return 0;
2602}
2603EXPORT_SYMBOL(hci_resume_dev);
2604
76bca880 2605/* Receive frame from HCI drivers */
e1a26170 2606int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 2607{
76bca880 2608 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2609 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2610 kfree_skb(skb);
2611 return -ENXIO;
2612 }
2613
d82603c6 2614 /* Incoming skb */
76bca880
MH
2615 bt_cb(skb)->incoming = 1;
2616
2617 /* Time stamp */
2618 __net_timestamp(skb);
2619
76bca880 2620 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2621 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2622
76bca880
MH
2623 return 0;
2624}
2625EXPORT_SYMBOL(hci_recv_frame);
2626
33e882a5 2627static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2628 int count, __u8 index)
33e882a5
SS
2629{
2630 int len = 0;
2631 int hlen = 0;
2632 int remain = count;
2633 struct sk_buff *skb;
2634 struct bt_skb_cb *scb;
2635
2636 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2637 index >= NUM_REASSEMBLY)
33e882a5
SS
2638 return -EILSEQ;
2639
2640 skb = hdev->reassembly[index];
2641
2642 if (!skb) {
2643 switch (type) {
2644 case HCI_ACLDATA_PKT:
2645 len = HCI_MAX_FRAME_SIZE;
2646 hlen = HCI_ACL_HDR_SIZE;
2647 break;
2648 case HCI_EVENT_PKT:
2649 len = HCI_MAX_EVENT_SIZE;
2650 hlen = HCI_EVENT_HDR_SIZE;
2651 break;
2652 case HCI_SCODATA_PKT:
2653 len = HCI_MAX_SCO_SIZE;
2654 hlen = HCI_SCO_HDR_SIZE;
2655 break;
2656 }
2657
1e429f38 2658 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2659 if (!skb)
2660 return -ENOMEM;
2661
2662 scb = (void *) skb->cb;
2663 scb->expect = hlen;
2664 scb->pkt_type = type;
2665
33e882a5
SS
2666 hdev->reassembly[index] = skb;
2667 }
2668
2669 while (count) {
2670 scb = (void *) skb->cb;
89bb46d0 2671 len = min_t(uint, scb->expect, count);
33e882a5
SS
2672
2673 memcpy(skb_put(skb, len), data, len);
2674
2675 count -= len;
2676 data += len;
2677 scb->expect -= len;
2678 remain = count;
2679
2680 switch (type) {
2681 case HCI_EVENT_PKT:
2682 if (skb->len == HCI_EVENT_HDR_SIZE) {
2683 struct hci_event_hdr *h = hci_event_hdr(skb);
2684 scb->expect = h->plen;
2685
2686 if (skb_tailroom(skb) < scb->expect) {
2687 kfree_skb(skb);
2688 hdev->reassembly[index] = NULL;
2689 return -ENOMEM;
2690 }
2691 }
2692 break;
2693
2694 case HCI_ACLDATA_PKT:
2695 if (skb->len == HCI_ACL_HDR_SIZE) {
2696 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2697 scb->expect = __le16_to_cpu(h->dlen);
2698
2699 if (skb_tailroom(skb) < scb->expect) {
2700 kfree_skb(skb);
2701 hdev->reassembly[index] = NULL;
2702 return -ENOMEM;
2703 }
2704 }
2705 break;
2706
2707 case HCI_SCODATA_PKT:
2708 if (skb->len == HCI_SCO_HDR_SIZE) {
2709 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2710 scb->expect = h->dlen;
2711
2712 if (skb_tailroom(skb) < scb->expect) {
2713 kfree_skb(skb);
2714 hdev->reassembly[index] = NULL;
2715 return -ENOMEM;
2716 }
2717 }
2718 break;
2719 }
2720
2721 if (scb->expect == 0) {
2722 /* Complete frame */
2723
2724 bt_cb(skb)->pkt_type = type;
e1a26170 2725 hci_recv_frame(hdev, skb);
33e882a5
SS
2726
2727 hdev->reassembly[index] = NULL;
2728 return remain;
2729 }
2730 }
2731
2732 return remain;
2733}
2734
ef222013
MH
2735int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2736{
f39a3c06
SS
2737 int rem = 0;
2738
ef222013
MH
2739 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2740 return -EILSEQ;
2741
da5f6c37 2742 while (count) {
1e429f38 2743 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2744 if (rem < 0)
2745 return rem;
ef222013 2746
f39a3c06
SS
2747 data += (count - rem);
2748 count = rem;
f81c6224 2749 }
ef222013 2750
f39a3c06 2751 return rem;
ef222013
MH
2752}
2753EXPORT_SYMBOL(hci_recv_fragment);
2754
99811510
SS
2755#define STREAM_REASSEMBLY 0
2756
2757int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2758{
2759 int type;
2760 int rem = 0;
2761
da5f6c37 2762 while (count) {
99811510
SS
2763 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2764
2765 if (!skb) {
2766 struct { char type; } *pkt;
2767
2768 /* Start of the frame */
2769 pkt = data;
2770 type = pkt->type;
2771
2772 data++;
2773 count--;
2774 } else
2775 type = bt_cb(skb)->pkt_type;
2776
1e429f38 2777 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2778 STREAM_REASSEMBLY);
99811510
SS
2779 if (rem < 0)
2780 return rem;
2781
2782 data += (count - rem);
2783 count = rem;
f81c6224 2784 }
99811510
SS
2785
2786 return rem;
2787}
2788EXPORT_SYMBOL(hci_recv_stream_fragment);
2789
1da177e4
LT
2790/* ---- Interface to upper protocols ---- */
2791
1da177e4
LT
2792int hci_register_cb(struct hci_cb *cb)
2793{
2794 BT_DBG("%p name %s", cb, cb->name);
2795
f20d09d5 2796 write_lock(&hci_cb_list_lock);
1da177e4 2797 list_add(&cb->list, &hci_cb_list);
f20d09d5 2798 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2799
2800 return 0;
2801}
2802EXPORT_SYMBOL(hci_register_cb);
2803
2804int hci_unregister_cb(struct hci_cb *cb)
2805{
2806 BT_DBG("%p name %s", cb, cb->name);
2807
f20d09d5 2808 write_lock(&hci_cb_list_lock);
1da177e4 2809 list_del(&cb->list);
f20d09d5 2810 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2811
2812 return 0;
2813}
2814EXPORT_SYMBOL(hci_unregister_cb);
2815
51086991 2816static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 2817{
0d48d939 2818 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2819
cd82e61c
MH
2820 /* Time stamp */
2821 __net_timestamp(skb);
1da177e4 2822
cd82e61c
MH
2823 /* Send copy to monitor */
2824 hci_send_to_monitor(hdev, skb);
2825
2826 if (atomic_read(&hdev->promisc)) {
2827 /* Send copy to the sockets */
470fe1b5 2828 hci_send_to_sock(hdev, skb);
1da177e4
LT
2829 }
2830
2831 /* Get rid of skb owner, prior to sending to the driver. */
2832 skb_orphan(skb);
2833
7bd8f09f 2834 if (hdev->send(hdev, skb) < 0)
51086991 2835 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
2836}
2837
3119ae95
JH
2838void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2839{
2840 skb_queue_head_init(&req->cmd_q);
2841 req->hdev = hdev;
5d73e034 2842 req->err = 0;
3119ae95
JH
2843}
2844
2845int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2846{
2847 struct hci_dev *hdev = req->hdev;
2848 struct sk_buff *skb;
2849 unsigned long flags;
2850
2851 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2852
5d73e034
AG
2853 /* If an error occured during request building, remove all HCI
2854 * commands queued on the HCI request queue.
2855 */
2856 if (req->err) {
2857 skb_queue_purge(&req->cmd_q);
2858 return req->err;
2859 }
2860
3119ae95
JH
2861 /* Do not allow empty requests */
2862 if (skb_queue_empty(&req->cmd_q))
382b0c39 2863 return -ENODATA;
3119ae95
JH
2864
2865 skb = skb_peek_tail(&req->cmd_q);
2866 bt_cb(skb)->req.complete = complete;
2867
2868 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2869 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2870 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2871
2872 queue_work(hdev->workqueue, &hdev->cmd_work);
2873
2874 return 0;
2875}
2876
1ca3a9d0 2877static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 2878 u32 plen, const void *param)
1da177e4
LT
2879{
2880 int len = HCI_COMMAND_HDR_SIZE + plen;
2881 struct hci_command_hdr *hdr;
2882 struct sk_buff *skb;
2883
1da177e4 2884 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2885 if (!skb)
2886 return NULL;
1da177e4
LT
2887
2888 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2889 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2890 hdr->plen = plen;
2891
2892 if (plen)
2893 memcpy(skb_put(skb, plen), param, plen);
2894
2895 BT_DBG("skb len %d", skb->len);
2896
0d48d939 2897 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 2898
1ca3a9d0
JH
2899 return skb;
2900}
2901
2902/* Send HCI command */
07dc93dd
JH
2903int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2904 const void *param)
1ca3a9d0
JH
2905{
2906 struct sk_buff *skb;
2907
2908 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2909
2910 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2911 if (!skb) {
2912 BT_ERR("%s no memory for command", hdev->name);
2913 return -ENOMEM;
2914 }
2915
11714b3d
JH
2916 /* Stand-alone HCI commands must be flaged as
2917 * single-command requests.
2918 */
2919 bt_cb(skb)->req.start = true;
2920
1da177e4 2921 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2922 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2923
2924 return 0;
2925}
1da177e4 2926
71c76a17 2927/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
2928void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2929 const void *param, u8 event)
71c76a17
JH
2930{
2931 struct hci_dev *hdev = req->hdev;
2932 struct sk_buff *skb;
2933
2934 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2935
34739c1e
AG
2936 /* If an error occured during request building, there is no point in
2937 * queueing the HCI command. We can simply return.
2938 */
2939 if (req->err)
2940 return;
2941
71c76a17
JH
2942 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2943 if (!skb) {
5d73e034
AG
2944 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2945 hdev->name, opcode);
2946 req->err = -ENOMEM;
e348fe6b 2947 return;
71c76a17
JH
2948 }
2949
2950 if (skb_queue_empty(&req->cmd_q))
2951 bt_cb(skb)->req.start = true;
2952
02350a72
JH
2953 bt_cb(skb)->req.event = event;
2954
71c76a17 2955 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2956}
2957
07dc93dd
JH
2958void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2959 const void *param)
02350a72
JH
2960{
2961 hci_req_add_ev(req, opcode, plen, param, 0);
2962}
2963
1da177e4 2964/* Get data from the previously sent command */
a9de9248 2965void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2966{
2967 struct hci_command_hdr *hdr;
2968
2969 if (!hdev->sent_cmd)
2970 return NULL;
2971
2972 hdr = (void *) hdev->sent_cmd->data;
2973
a9de9248 2974 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2975 return NULL;
2976
f0e09510 2977 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2978
2979 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2980}
2981
2982/* Send ACL data */
2983static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2984{
2985 struct hci_acl_hdr *hdr;
2986 int len = skb->len;
2987
badff6d0
ACM
2988 skb_push(skb, HCI_ACL_HDR_SIZE);
2989 skb_reset_transport_header(skb);
9c70220b 2990 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2991 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2992 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2993}
2994
ee22be7e 2995static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2996 struct sk_buff *skb, __u16 flags)
1da177e4 2997{
ee22be7e 2998 struct hci_conn *conn = chan->conn;
1da177e4
LT
2999 struct hci_dev *hdev = conn->hdev;
3000 struct sk_buff *list;
3001
087bfd99
GP
3002 skb->len = skb_headlen(skb);
3003 skb->data_len = 0;
3004
3005 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3006
3007 switch (hdev->dev_type) {
3008 case HCI_BREDR:
3009 hci_add_acl_hdr(skb, conn->handle, flags);
3010 break;
3011 case HCI_AMP:
3012 hci_add_acl_hdr(skb, chan->handle, flags);
3013 break;
3014 default:
3015 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3016 return;
3017 }
087bfd99 3018
70f23020
AE
3019 list = skb_shinfo(skb)->frag_list;
3020 if (!list) {
1da177e4
LT
3021 /* Non fragmented */
3022 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3023
73d80deb 3024 skb_queue_tail(queue, skb);
1da177e4
LT
3025 } else {
3026 /* Fragmented */
3027 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3028
3029 skb_shinfo(skb)->frag_list = NULL;
3030
3031 /* Queue all fragments atomically */
af3e6359 3032 spin_lock(&queue->lock);
1da177e4 3033
73d80deb 3034 __skb_queue_tail(queue, skb);
e702112f
AE
3035
3036 flags &= ~ACL_START;
3037 flags |= ACL_CONT;
1da177e4
LT
3038 do {
3039 skb = list; list = list->next;
8e87d142 3040
0d48d939 3041 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3042 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3043
3044 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3045
73d80deb 3046 __skb_queue_tail(queue, skb);
1da177e4
LT
3047 } while (list);
3048
af3e6359 3049 spin_unlock(&queue->lock);
1da177e4 3050 }
73d80deb
LAD
3051}
3052
3053void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3054{
ee22be7e 3055 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3056
f0e09510 3057 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3058
ee22be7e 3059 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3060
3eff45ea 3061 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3062}
1da177e4
LT
3063
3064/* Send SCO data */
0d861d8b 3065void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3066{
3067 struct hci_dev *hdev = conn->hdev;
3068 struct hci_sco_hdr hdr;
3069
3070 BT_DBG("%s len %d", hdev->name, skb->len);
3071
aca3192c 3072 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3073 hdr.dlen = skb->len;
3074
badff6d0
ACM
3075 skb_push(skb, HCI_SCO_HDR_SIZE);
3076 skb_reset_transport_header(skb);
9c70220b 3077 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3078
0d48d939 3079 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3080
1da177e4 3081 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3082 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3083}
1da177e4
LT
3084
3085/* ---- HCI TX task (outgoing data) ---- */
3086
3087/* HCI Connection scheduler */
6039aa73
GP
3088static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3089 int *quote)
1da177e4
LT
3090{
3091 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3092 struct hci_conn *conn = NULL, *c;
abc5de8f 3093 unsigned int num = 0, min = ~0;
1da177e4 3094
8e87d142 3095 /* We don't have to lock device here. Connections are always
1da177e4 3096 * added and removed with TX task disabled. */
bf4c6325
GP
3097
3098 rcu_read_lock();
3099
3100 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3101 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3102 continue;
769be974
MH
3103
3104 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3105 continue;
3106
1da177e4
LT
3107 num++;
3108
3109 if (c->sent < min) {
3110 min = c->sent;
3111 conn = c;
3112 }
52087a79
LAD
3113
3114 if (hci_conn_num(hdev, type) == num)
3115 break;
1da177e4
LT
3116 }
3117
bf4c6325
GP
3118 rcu_read_unlock();
3119
1da177e4 3120 if (conn) {
6ed58ec5
VT
3121 int cnt, q;
3122
3123 switch (conn->type) {
3124 case ACL_LINK:
3125 cnt = hdev->acl_cnt;
3126 break;
3127 case SCO_LINK:
3128 case ESCO_LINK:
3129 cnt = hdev->sco_cnt;
3130 break;
3131 case LE_LINK:
3132 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3133 break;
3134 default:
3135 cnt = 0;
3136 BT_ERR("Unknown link type");
3137 }
3138
3139 q = cnt / num;
1da177e4
LT
3140 *quote = q ? q : 1;
3141 } else
3142 *quote = 0;
3143
3144 BT_DBG("conn %p quote %d", conn, *quote);
3145 return conn;
3146}
3147
6039aa73 3148static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3149{
3150 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3151 struct hci_conn *c;
1da177e4 3152
bae1f5d9 3153 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3154
bf4c6325
GP
3155 rcu_read_lock();
3156
1da177e4 3157 /* Kill stalled connections */
bf4c6325 3158 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3159 if (c->type == type && c->sent) {
6ed93dc6
AE
3160 BT_ERR("%s killing stalled connection %pMR",
3161 hdev->name, &c->dst);
bed71748 3162 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3163 }
3164 }
bf4c6325
GP
3165
3166 rcu_read_unlock();
1da177e4
LT
3167}
3168
6039aa73
GP
3169static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3170 int *quote)
1da177e4 3171{
73d80deb
LAD
3172 struct hci_conn_hash *h = &hdev->conn_hash;
3173 struct hci_chan *chan = NULL;
abc5de8f 3174 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3175 struct hci_conn *conn;
73d80deb
LAD
3176 int cnt, q, conn_num = 0;
3177
3178 BT_DBG("%s", hdev->name);
3179
bf4c6325
GP
3180 rcu_read_lock();
3181
3182 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3183 struct hci_chan *tmp;
3184
3185 if (conn->type != type)
3186 continue;
3187
3188 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3189 continue;
3190
3191 conn_num++;
3192
8192edef 3193 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3194 struct sk_buff *skb;
3195
3196 if (skb_queue_empty(&tmp->data_q))
3197 continue;
3198
3199 skb = skb_peek(&tmp->data_q);
3200 if (skb->priority < cur_prio)
3201 continue;
3202
3203 if (skb->priority > cur_prio) {
3204 num = 0;
3205 min = ~0;
3206 cur_prio = skb->priority;
3207 }
3208
3209 num++;
3210
3211 if (conn->sent < min) {
3212 min = conn->sent;
3213 chan = tmp;
3214 }
3215 }
3216
3217 if (hci_conn_num(hdev, type) == conn_num)
3218 break;
3219 }
3220
bf4c6325
GP
3221 rcu_read_unlock();
3222
73d80deb
LAD
3223 if (!chan)
3224 return NULL;
3225
3226 switch (chan->conn->type) {
3227 case ACL_LINK:
3228 cnt = hdev->acl_cnt;
3229 break;
bd1eb66b
AE
3230 case AMP_LINK:
3231 cnt = hdev->block_cnt;
3232 break;
73d80deb
LAD
3233 case SCO_LINK:
3234 case ESCO_LINK:
3235 cnt = hdev->sco_cnt;
3236 break;
3237 case LE_LINK:
3238 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3239 break;
3240 default:
3241 cnt = 0;
3242 BT_ERR("Unknown link type");
3243 }
3244
3245 q = cnt / num;
3246 *quote = q ? q : 1;
3247 BT_DBG("chan %p quote %d", chan, *quote);
3248 return chan;
3249}
3250
02b20f0b
LAD
3251static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3252{
3253 struct hci_conn_hash *h = &hdev->conn_hash;
3254 struct hci_conn *conn;
3255 int num = 0;
3256
3257 BT_DBG("%s", hdev->name);
3258
bf4c6325
GP
3259 rcu_read_lock();
3260
3261 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3262 struct hci_chan *chan;
3263
3264 if (conn->type != type)
3265 continue;
3266
3267 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3268 continue;
3269
3270 num++;
3271
8192edef 3272 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3273 struct sk_buff *skb;
3274
3275 if (chan->sent) {
3276 chan->sent = 0;
3277 continue;
3278 }
3279
3280 if (skb_queue_empty(&chan->data_q))
3281 continue;
3282
3283 skb = skb_peek(&chan->data_q);
3284 if (skb->priority >= HCI_PRIO_MAX - 1)
3285 continue;
3286
3287 skb->priority = HCI_PRIO_MAX - 1;
3288
3289 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3290 skb->priority);
02b20f0b
LAD
3291 }
3292
3293 if (hci_conn_num(hdev, type) == num)
3294 break;
3295 }
bf4c6325
GP
3296
3297 rcu_read_unlock();
3298
02b20f0b
LAD
3299}
3300
b71d385a
AE
3301static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3302{
3303 /* Calculate count of blocks used by this packet */
3304 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3305}
3306
6039aa73 3307static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3308{
1da177e4
LT
3309 if (!test_bit(HCI_RAW, &hdev->flags)) {
3310 /* ACL tx timeout must be longer than maximum
3311 * link supervision timeout (40.9 seconds) */
63d2bc1b 3312 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3313 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3314 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3315 }
63d2bc1b 3316}
1da177e4 3317
6039aa73 3318static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3319{
3320 unsigned int cnt = hdev->acl_cnt;
3321 struct hci_chan *chan;
3322 struct sk_buff *skb;
3323 int quote;
3324
3325 __check_timeout(hdev, cnt);
04837f64 3326
73d80deb 3327 while (hdev->acl_cnt &&
a8c5fb1a 3328 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3329 u32 priority = (skb_peek(&chan->data_q))->priority;
3330 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3331 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3332 skb->len, skb->priority);
73d80deb 3333
ec1cce24
LAD
3334 /* Stop if priority has changed */
3335 if (skb->priority < priority)
3336 break;
3337
3338 skb = skb_dequeue(&chan->data_q);
3339
73d80deb 3340 hci_conn_enter_active_mode(chan->conn,
04124681 3341 bt_cb(skb)->force_active);
04837f64 3342
57d17d70 3343 hci_send_frame(hdev, skb);
1da177e4
LT
3344 hdev->acl_last_tx = jiffies;
3345
3346 hdev->acl_cnt--;
73d80deb
LAD
3347 chan->sent++;
3348 chan->conn->sent++;
1da177e4
LT
3349 }
3350 }
02b20f0b
LAD
3351
3352 if (cnt != hdev->acl_cnt)
3353 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3354}
3355
6039aa73 3356static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3357{
63d2bc1b 3358 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3359 struct hci_chan *chan;
3360 struct sk_buff *skb;
3361 int quote;
bd1eb66b 3362 u8 type;
b71d385a 3363
63d2bc1b 3364 __check_timeout(hdev, cnt);
b71d385a 3365
bd1eb66b
AE
3366 BT_DBG("%s", hdev->name);
3367
3368 if (hdev->dev_type == HCI_AMP)
3369 type = AMP_LINK;
3370 else
3371 type = ACL_LINK;
3372
b71d385a 3373 while (hdev->block_cnt > 0 &&
bd1eb66b 3374 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3375 u32 priority = (skb_peek(&chan->data_q))->priority;
3376 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3377 int blocks;
3378
3379 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3380 skb->len, skb->priority);
b71d385a
AE
3381
3382 /* Stop if priority has changed */
3383 if (skb->priority < priority)
3384 break;
3385
3386 skb = skb_dequeue(&chan->data_q);
3387
3388 blocks = __get_blocks(hdev, skb);
3389 if (blocks > hdev->block_cnt)
3390 return;
3391
3392 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3393 bt_cb(skb)->force_active);
b71d385a 3394
57d17d70 3395 hci_send_frame(hdev, skb);
b71d385a
AE
3396 hdev->acl_last_tx = jiffies;
3397
3398 hdev->block_cnt -= blocks;
3399 quote -= blocks;
3400
3401 chan->sent += blocks;
3402 chan->conn->sent += blocks;
3403 }
3404 }
3405
3406 if (cnt != hdev->block_cnt)
bd1eb66b 3407 hci_prio_recalculate(hdev, type);
b71d385a
AE
3408}
3409
6039aa73 3410static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3411{
3412 BT_DBG("%s", hdev->name);
3413
bd1eb66b
AE
3414 /* No ACL link over BR/EDR controller */
3415 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3416 return;
3417
3418 /* No AMP link over AMP controller */
3419 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3420 return;
3421
3422 switch (hdev->flow_ctl_mode) {
3423 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3424 hci_sched_acl_pkt(hdev);
3425 break;
3426
3427 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3428 hci_sched_acl_blk(hdev);
3429 break;
3430 }
3431}
3432
1da177e4 3433/* Schedule SCO */
6039aa73 3434static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3435{
3436 struct hci_conn *conn;
3437 struct sk_buff *skb;
3438 int quote;
3439
3440 BT_DBG("%s", hdev->name);
3441
52087a79
LAD
3442 if (!hci_conn_num(hdev, SCO_LINK))
3443 return;
3444
1da177e4
LT
3445 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3446 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3447 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3448 hci_send_frame(hdev, skb);
1da177e4
LT
3449
3450 conn->sent++;
3451 if (conn->sent == ~0)
3452 conn->sent = 0;
3453 }
3454 }
3455}
3456
6039aa73 3457static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3458{
3459 struct hci_conn *conn;
3460 struct sk_buff *skb;
3461 int quote;
3462
3463 BT_DBG("%s", hdev->name);
3464
52087a79
LAD
3465 if (!hci_conn_num(hdev, ESCO_LINK))
3466 return;
3467
8fc9ced3
GP
3468 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3469 &quote))) {
b6a0dc82
MH
3470 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3471 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3472 hci_send_frame(hdev, skb);
b6a0dc82
MH
3473
3474 conn->sent++;
3475 if (conn->sent == ~0)
3476 conn->sent = 0;
3477 }
3478 }
3479}
3480
6039aa73 3481static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3482{
73d80deb 3483 struct hci_chan *chan;
6ed58ec5 3484 struct sk_buff *skb;
02b20f0b 3485 int quote, cnt, tmp;
6ed58ec5
VT
3486
3487 BT_DBG("%s", hdev->name);
3488
52087a79
LAD
3489 if (!hci_conn_num(hdev, LE_LINK))
3490 return;
3491
6ed58ec5
VT
3492 if (!test_bit(HCI_RAW, &hdev->flags)) {
3493 /* LE tx timeout must be longer than maximum
3494 * link supervision timeout (40.9 seconds) */
bae1f5d9 3495 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3496 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3497 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3498 }
3499
3500 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3501 tmp = cnt;
73d80deb 3502 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3503 u32 priority = (skb_peek(&chan->data_q))->priority;
3504 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3505 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3506 skb->len, skb->priority);
6ed58ec5 3507
ec1cce24
LAD
3508 /* Stop if priority has changed */
3509 if (skb->priority < priority)
3510 break;
3511
3512 skb = skb_dequeue(&chan->data_q);
3513
57d17d70 3514 hci_send_frame(hdev, skb);
6ed58ec5
VT
3515 hdev->le_last_tx = jiffies;
3516
3517 cnt--;
73d80deb
LAD
3518 chan->sent++;
3519 chan->conn->sent++;
6ed58ec5
VT
3520 }
3521 }
73d80deb 3522
6ed58ec5
VT
3523 if (hdev->le_pkts)
3524 hdev->le_cnt = cnt;
3525 else
3526 hdev->acl_cnt = cnt;
02b20f0b
LAD
3527
3528 if (cnt != tmp)
3529 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3530}
3531
3eff45ea 3532static void hci_tx_work(struct work_struct *work)
1da177e4 3533{
3eff45ea 3534 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3535 struct sk_buff *skb;
3536
6ed58ec5 3537 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3538 hdev->sco_cnt, hdev->le_cnt);
1da177e4 3539
52de599e
MH
3540 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3541 /* Schedule queues and send stuff to HCI driver */
3542 hci_sched_acl(hdev);
3543 hci_sched_sco(hdev);
3544 hci_sched_esco(hdev);
3545 hci_sched_le(hdev);
3546 }
6ed58ec5 3547
1da177e4
LT
3548 /* Send next queued raw (unknown type) packet */
3549 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 3550 hci_send_frame(hdev, skb);
1da177e4
LT
3551}
3552
25985edc 3553/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3554
3555/* ACL data packet */
6039aa73 3556static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3557{
3558 struct hci_acl_hdr *hdr = (void *) skb->data;
3559 struct hci_conn *conn;
3560 __u16 handle, flags;
3561
3562 skb_pull(skb, HCI_ACL_HDR_SIZE);
3563
3564 handle = __le16_to_cpu(hdr->handle);
3565 flags = hci_flags(handle);
3566 handle = hci_handle(handle);
3567
f0e09510 3568 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3569 handle, flags);
1da177e4
LT
3570
3571 hdev->stat.acl_rx++;
3572
3573 hci_dev_lock(hdev);
3574 conn = hci_conn_hash_lookup_handle(hdev, handle);
3575 hci_dev_unlock(hdev);
8e87d142 3576
1da177e4 3577 if (conn) {
65983fc7 3578 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3579
1da177e4 3580 /* Send to upper protocol */
686ebf28
UF
3581 l2cap_recv_acldata(conn, skb, flags);
3582 return;
1da177e4 3583 } else {
8e87d142 3584 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3585 hdev->name, handle);
1da177e4
LT
3586 }
3587
3588 kfree_skb(skb);
3589}
3590
3591/* SCO data packet */
6039aa73 3592static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3593{
3594 struct hci_sco_hdr *hdr = (void *) skb->data;
3595 struct hci_conn *conn;
3596 __u16 handle;
3597
3598 skb_pull(skb, HCI_SCO_HDR_SIZE);
3599
3600 handle = __le16_to_cpu(hdr->handle);
3601
f0e09510 3602 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3603
3604 hdev->stat.sco_rx++;
3605
3606 hci_dev_lock(hdev);
3607 conn = hci_conn_hash_lookup_handle(hdev, handle);
3608 hci_dev_unlock(hdev);
3609
3610 if (conn) {
1da177e4 3611 /* Send to upper protocol */
686ebf28
UF
3612 sco_recv_scodata(conn, skb);
3613 return;
1da177e4 3614 } else {
8e87d142 3615 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3616 hdev->name, handle);
1da177e4
LT
3617 }
3618
3619 kfree_skb(skb);
3620}
3621
9238f36a
JH
3622static bool hci_req_is_complete(struct hci_dev *hdev)
3623{
3624 struct sk_buff *skb;
3625
3626 skb = skb_peek(&hdev->cmd_q);
3627 if (!skb)
3628 return true;
3629
3630 return bt_cb(skb)->req.start;
3631}
3632
42c6b129
JH
3633static void hci_resend_last(struct hci_dev *hdev)
3634{
3635 struct hci_command_hdr *sent;
3636 struct sk_buff *skb;
3637 u16 opcode;
3638
3639 if (!hdev->sent_cmd)
3640 return;
3641
3642 sent = (void *) hdev->sent_cmd->data;
3643 opcode = __le16_to_cpu(sent->opcode);
3644 if (opcode == HCI_OP_RESET)
3645 return;
3646
3647 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3648 if (!skb)
3649 return;
3650
3651 skb_queue_head(&hdev->cmd_q, skb);
3652 queue_work(hdev->workqueue, &hdev->cmd_work);
3653}
3654
9238f36a
JH
3655void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3656{
3657 hci_req_complete_t req_complete = NULL;
3658 struct sk_buff *skb;
3659 unsigned long flags;
3660
3661 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3662
42c6b129
JH
3663 /* If the completed command doesn't match the last one that was
3664 * sent we need to do special handling of it.
9238f36a 3665 */
42c6b129
JH
3666 if (!hci_sent_cmd_data(hdev, opcode)) {
3667 /* Some CSR based controllers generate a spontaneous
3668 * reset complete event during init and any pending
3669 * command will never be completed. In such a case we
3670 * need to resend whatever was the last sent
3671 * command.
3672 */
3673 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3674 hci_resend_last(hdev);
3675
9238f36a 3676 return;
42c6b129 3677 }
9238f36a
JH
3678
3679 /* If the command succeeded and there's still more commands in
3680 * this request the request is not yet complete.
3681 */
3682 if (!status && !hci_req_is_complete(hdev))
3683 return;
3684
3685 /* If this was the last command in a request the complete
3686 * callback would be found in hdev->sent_cmd instead of the
3687 * command queue (hdev->cmd_q).
3688 */
3689 if (hdev->sent_cmd) {
3690 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
3691
3692 if (req_complete) {
3693 /* We must set the complete callback to NULL to
3694 * avoid calling the callback more than once if
3695 * this function gets called again.
3696 */
3697 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3698
9238f36a 3699 goto call_complete;
53e21fbc 3700 }
9238f36a
JH
3701 }
3702
3703 /* Remove all pending commands belonging to this request */
3704 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3705 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3706 if (bt_cb(skb)->req.start) {
3707 __skb_queue_head(&hdev->cmd_q, skb);
3708 break;
3709 }
3710
3711 req_complete = bt_cb(skb)->req.complete;
3712 kfree_skb(skb);
3713 }
3714 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3715
3716call_complete:
3717 if (req_complete)
3718 req_complete(hdev, status);
3719}
3720
b78752cc 3721static void hci_rx_work(struct work_struct *work)
1da177e4 3722{
b78752cc 3723 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3724 struct sk_buff *skb;
3725
3726 BT_DBG("%s", hdev->name);
3727
1da177e4 3728 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3729 /* Send copy to monitor */
3730 hci_send_to_monitor(hdev, skb);
3731
1da177e4
LT
3732 if (atomic_read(&hdev->promisc)) {
3733 /* Send copy to the sockets */
470fe1b5 3734 hci_send_to_sock(hdev, skb);
1da177e4
LT
3735 }
3736
0736cfa8
MH
3737 if (test_bit(HCI_RAW, &hdev->flags) ||
3738 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
3739 kfree_skb(skb);
3740 continue;
3741 }
3742
3743 if (test_bit(HCI_INIT, &hdev->flags)) {
3744 /* Don't process data packets in this states. */
0d48d939 3745 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3746 case HCI_ACLDATA_PKT:
3747 case HCI_SCODATA_PKT:
3748 kfree_skb(skb);
3749 continue;
3ff50b79 3750 }
1da177e4
LT
3751 }
3752
3753 /* Process frame */
0d48d939 3754 switch (bt_cb(skb)->pkt_type) {
1da177e4 3755 case HCI_EVENT_PKT:
b78752cc 3756 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3757 hci_event_packet(hdev, skb);
3758 break;
3759
3760 case HCI_ACLDATA_PKT:
3761 BT_DBG("%s ACL data packet", hdev->name);
3762 hci_acldata_packet(hdev, skb);
3763 break;
3764
3765 case HCI_SCODATA_PKT:
3766 BT_DBG("%s SCO data packet", hdev->name);
3767 hci_scodata_packet(hdev, skb);
3768 break;
3769
3770 default:
3771 kfree_skb(skb);
3772 break;
3773 }
3774 }
1da177e4
LT
3775}
3776
c347b765 3777static void hci_cmd_work(struct work_struct *work)
1da177e4 3778{
c347b765 3779 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3780 struct sk_buff *skb;
3781
2104786b
AE
3782 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3783 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3784
1da177e4 3785 /* Send queued commands */
5a08ecce
AE
3786 if (atomic_read(&hdev->cmd_cnt)) {
3787 skb = skb_dequeue(&hdev->cmd_q);
3788 if (!skb)
3789 return;
3790
7585b97a 3791 kfree_skb(hdev->sent_cmd);
1da177e4 3792
a675d7f1 3793 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 3794 if (hdev->sent_cmd) {
1da177e4 3795 atomic_dec(&hdev->cmd_cnt);
57d17d70 3796 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
3797 if (test_bit(HCI_RESET, &hdev->flags))
3798 del_timer(&hdev->cmd_timer);
3799 else
3800 mod_timer(&hdev->cmd_timer,
5f246e89 3801 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3802 } else {
3803 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3804 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3805 }
3806 }
3807}
This page took 0.94605 seconds and 5 git commands to generate.