Bluetooth: Add support for handling P-256 derived link keys
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
47219839 32#include <asm/unaligned.h>
1da177e4
LT
33
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
b78752cc 37static void hci_rx_work(struct work_struct *work);
c347b765 38static void hci_cmd_work(struct work_struct *work);
3eff45ea 39static void hci_tx_work(struct work_struct *work);
1da177e4 40
1da177e4
LT
41/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
3df92b31
SL
49/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
1da177e4
LT
52/* ---- HCI notifications ---- */
53
6516455d 54static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 55{
040030ef 56 hci_sock_dev_event(hdev, event);
1da177e4
LT
57}
58
baf27f6e
MH
59/* ---- HCI debugfs entries ---- */
60
4b4148e9
MH
61static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
dfb826a8
MH
126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
cfbb2b5b
MH
140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
70afe0b8
MH
164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
47219839
MH
189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
196 u8 i, val[16];
197
198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
204
205 seq_printf(f, "%pUb\n", val);
47219839
MH
206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
baf27f6e
MH
224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
02d08d15
MH
260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
12c269d7
MH
288static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 size_t count, loff_t *ppos)
290{
291 struct hci_dev *hdev = file->private_data;
292 char buf[3];
293
294 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295 buf[1] = '\n';
296 buf[2] = '\0';
297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
298}
299
300static const struct file_operations use_debug_keys_fops = {
301 .open = simple_open,
302 .read = use_debug_keys_read,
303 .llseek = default_llseek,
304};
305
babdbb3c
MH
306static int dev_class_show(struct seq_file *f, void *ptr)
307{
308 struct hci_dev *hdev = f->private;
309
310 hci_dev_lock(hdev);
311 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 hdev->dev_class[1], hdev->dev_class[0]);
313 hci_dev_unlock(hdev);
314
315 return 0;
316}
317
318static int dev_class_open(struct inode *inode, struct file *file)
319{
320 return single_open(file, dev_class_show, inode->i_private);
321}
322
323static const struct file_operations dev_class_fops = {
324 .open = dev_class_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = single_release,
328};
329
041000b9
MH
330static int voice_setting_get(void *data, u64 *val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 *val = hdev->voice_setting;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 NULL, "0x%4.4llx\n");
343
ebd1e33b
MH
344static int auto_accept_delay_set(void *data, u64 val)
345{
346 struct hci_dev *hdev = data;
347
348 hci_dev_lock(hdev);
349 hdev->auto_accept_delay = val;
350 hci_dev_unlock(hdev);
351
352 return 0;
353}
354
355static int auto_accept_delay_get(void *data, u64 *val)
356{
357 struct hci_dev *hdev = data;
358
359 hci_dev_lock(hdev);
360 *val = hdev->auto_accept_delay;
361 hci_dev_unlock(hdev);
362
363 return 0;
364}
365
366DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 auto_accept_delay_set, "%llu\n");
368
06f5b778
MH
369static int ssp_debug_mode_set(void *data, u64 val)
370{
371 struct hci_dev *hdev = data;
372 struct sk_buff *skb;
373 __u8 mode;
374 int err;
375
376 if (val != 0 && val != 1)
377 return -EINVAL;
378
379 if (!test_bit(HCI_UP, &hdev->flags))
380 return -ENETDOWN;
381
382 hci_req_lock(hdev);
383 mode = val;
384 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 &mode, HCI_CMD_TIMEOUT);
386 hci_req_unlock(hdev);
387
388 if (IS_ERR(skb))
389 return PTR_ERR(skb);
390
391 err = -bt_to_errno(skb->data[0]);
392 kfree_skb(skb);
393
394 if (err < 0)
395 return err;
396
397 hci_dev_lock(hdev);
398 hdev->ssp_debug_mode = val;
399 hci_dev_unlock(hdev);
400
401 return 0;
402}
403
404static int ssp_debug_mode_get(void *data, u64 *val)
405{
406 struct hci_dev *hdev = data;
407
408 hci_dev_lock(hdev);
409 *val = hdev->ssp_debug_mode;
410 hci_dev_unlock(hdev);
411
412 return 0;
413}
414
415DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 ssp_debug_mode_set, "%llu\n");
417
2bfa3531
MH
418static int idle_timeout_set(void *data, u64 val)
419{
420 struct hci_dev *hdev = data;
421
422 if (val != 0 && (val < 500 || val > 3600000))
423 return -EINVAL;
424
425 hci_dev_lock(hdev);
2be48b65 426 hdev->idle_timeout = val;
2bfa3531
MH
427 hci_dev_unlock(hdev);
428
429 return 0;
430}
431
432static int idle_timeout_get(void *data, u64 *val)
433{
434 struct hci_dev *hdev = data;
435
436 hci_dev_lock(hdev);
437 *val = hdev->idle_timeout;
438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
444 idle_timeout_set, "%llu\n");
445
446static int sniff_min_interval_set(void *data, u64 val)
447{
448 struct hci_dev *hdev = data;
449
450 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
451 return -EINVAL;
452
453 hci_dev_lock(hdev);
2be48b65 454 hdev->sniff_min_interval = val;
2bfa3531
MH
455 hci_dev_unlock(hdev);
456
457 return 0;
458}
459
460static int sniff_min_interval_get(void *data, u64 *val)
461{
462 struct hci_dev *hdev = data;
463
464 hci_dev_lock(hdev);
465 *val = hdev->sniff_min_interval;
466 hci_dev_unlock(hdev);
467
468 return 0;
469}
470
471DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
472 sniff_min_interval_set, "%llu\n");
473
474static int sniff_max_interval_set(void *data, u64 val)
475{
476 struct hci_dev *hdev = data;
477
478 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
479 return -EINVAL;
480
481 hci_dev_lock(hdev);
2be48b65 482 hdev->sniff_max_interval = val;
2bfa3531
MH
483 hci_dev_unlock(hdev);
484
485 return 0;
486}
487
488static int sniff_max_interval_get(void *data, u64 *val)
489{
490 struct hci_dev *hdev = data;
491
492 hci_dev_lock(hdev);
493 *val = hdev->sniff_max_interval;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
500 sniff_max_interval_set, "%llu\n");
501
e7b8fc92
MH
502static int static_address_show(struct seq_file *f, void *p)
503{
504 struct hci_dev *hdev = f->private;
505
506 hci_dev_lock(hdev);
507 seq_printf(f, "%pMR\n", &hdev->static_addr);
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513static int static_address_open(struct inode *inode, struct file *file)
514{
515 return single_open(file, static_address_show, inode->i_private);
516}
517
518static const struct file_operations static_address_fops = {
519 .open = static_address_open,
520 .read = seq_read,
521 .llseek = seq_lseek,
522 .release = single_release,
523};
524
92202185
MH
525static int own_address_type_set(void *data, u64 val)
526{
527 struct hci_dev *hdev = data;
528
529 if (val != 0 && val != 1)
530 return -EINVAL;
531
532 hci_dev_lock(hdev);
533 hdev->own_addr_type = val;
534 hci_dev_unlock(hdev);
535
536 return 0;
537}
538
539static int own_address_type_get(void *data, u64 *val)
540{
541 struct hci_dev *hdev = data;
542
543 hci_dev_lock(hdev);
544 *val = hdev->own_addr_type;
545 hci_dev_unlock(hdev);
546
547 return 0;
548}
549
550DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
551 own_address_type_set, "%llu\n");
552
8f8625cd
MH
553static int long_term_keys_show(struct seq_file *f, void *ptr)
554{
555 struct hci_dev *hdev = f->private;
556 struct list_head *p, *n;
557
558 hci_dev_lock(hdev);
559 list_for_each_safe(p, n, &hdev->link_keys) {
560 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
561 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
563 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
564 8, ltk->rand, 16, ltk->val);
565 }
566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int long_term_keys_open(struct inode *inode, struct file *file)
572{
573 return single_open(file, long_term_keys_show, inode->i_private);
574}
575
576static const struct file_operations long_term_keys_fops = {
577 .open = long_term_keys_open,
578 .read = seq_read,
579 .llseek = seq_lseek,
580 .release = single_release,
581};
582
4e70c7e7
MH
583static int conn_min_interval_set(void *data, u64 val)
584{
585 struct hci_dev *hdev = data;
586
587 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
2be48b65 591 hdev->le_conn_min_interval = val;
4e70c7e7
MH
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597static int conn_min_interval_get(void *data, u64 *val)
598{
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->le_conn_min_interval;
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
609 conn_min_interval_set, "%llu\n");
610
611static int conn_max_interval_set(void *data, u64 val)
612{
613 struct hci_dev *hdev = data;
614
615 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
2be48b65 619 hdev->le_conn_max_interval = val;
4e70c7e7
MH
620 hci_dev_unlock(hdev);
621
622 return 0;
623}
624
625static int conn_max_interval_get(void *data, u64 *val)
626{
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->le_conn_max_interval;
631 hci_dev_unlock(hdev);
632
633 return 0;
634}
635
636DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637 conn_max_interval_set, "%llu\n");
638
89863109
JR
639static ssize_t lowpan_read(struct file *file, char __user *user_buf,
640 size_t count, loff_t *ppos)
641{
642 struct hci_dev *hdev = file->private_data;
643 char buf[3];
644
645 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
646 buf[1] = '\n';
647 buf[2] = '\0';
648 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
649}
650
651static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
652 size_t count, loff_t *position)
653{
654 struct hci_dev *hdev = fp->private_data;
655 bool enable;
656 char buf[32];
657 size_t buf_size = min(count, (sizeof(buf)-1));
658
659 if (copy_from_user(buf, user_buffer, buf_size))
660 return -EFAULT;
661
662 buf[buf_size] = '\0';
663
664 if (strtobool(buf, &enable) < 0)
665 return -EINVAL;
666
667 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
668 return -EALREADY;
669
670 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
671
672 return count;
673}
674
675static const struct file_operations lowpan_debugfs_fops = {
676 .open = simple_open,
677 .read = lowpan_read,
678 .write = lowpan_write,
679 .llseek = default_llseek,
680};
681
1da177e4
LT
682/* ---- HCI requests ---- */
683
42c6b129 684static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 685{
42c6b129 686 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
687
688 if (hdev->req_status == HCI_REQ_PEND) {
689 hdev->req_result = result;
690 hdev->req_status = HCI_REQ_DONE;
691 wake_up_interruptible(&hdev->req_wait_q);
692 }
693}
694
695static void hci_req_cancel(struct hci_dev *hdev, int err)
696{
697 BT_DBG("%s err 0x%2.2x", hdev->name, err);
698
699 if (hdev->req_status == HCI_REQ_PEND) {
700 hdev->req_result = err;
701 hdev->req_status = HCI_REQ_CANCELED;
702 wake_up_interruptible(&hdev->req_wait_q);
703 }
704}
705
77a63e0a
FW
706static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
707 u8 event)
75e84b7c
JH
708{
709 struct hci_ev_cmd_complete *ev;
710 struct hci_event_hdr *hdr;
711 struct sk_buff *skb;
712
713 hci_dev_lock(hdev);
714
715 skb = hdev->recv_evt;
716 hdev->recv_evt = NULL;
717
718 hci_dev_unlock(hdev);
719
720 if (!skb)
721 return ERR_PTR(-ENODATA);
722
723 if (skb->len < sizeof(*hdr)) {
724 BT_ERR("Too short HCI event");
725 goto failed;
726 }
727
728 hdr = (void *) skb->data;
729 skb_pull(skb, HCI_EVENT_HDR_SIZE);
730
7b1abbbe
JH
731 if (event) {
732 if (hdr->evt != event)
733 goto failed;
734 return skb;
735 }
736
75e84b7c
JH
737 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
738 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
739 goto failed;
740 }
741
742 if (skb->len < sizeof(*ev)) {
743 BT_ERR("Too short cmd_complete event");
744 goto failed;
745 }
746
747 ev = (void *) skb->data;
748 skb_pull(skb, sizeof(*ev));
749
750 if (opcode == __le16_to_cpu(ev->opcode))
751 return skb;
752
753 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
754 __le16_to_cpu(ev->opcode));
755
756failed:
757 kfree_skb(skb);
758 return ERR_PTR(-ENODATA);
759}
760
7b1abbbe 761struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 762 const void *param, u8 event, u32 timeout)
75e84b7c
JH
763{
764 DECLARE_WAITQUEUE(wait, current);
765 struct hci_request req;
766 int err = 0;
767
768 BT_DBG("%s", hdev->name);
769
770 hci_req_init(&req, hdev);
771
7b1abbbe 772 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
773
774 hdev->req_status = HCI_REQ_PEND;
775
776 err = hci_req_run(&req, hci_req_sync_complete);
777 if (err < 0)
778 return ERR_PTR(err);
779
780 add_wait_queue(&hdev->req_wait_q, &wait);
781 set_current_state(TASK_INTERRUPTIBLE);
782
783 schedule_timeout(timeout);
784
785 remove_wait_queue(&hdev->req_wait_q, &wait);
786
787 if (signal_pending(current))
788 return ERR_PTR(-EINTR);
789
790 switch (hdev->req_status) {
791 case HCI_REQ_DONE:
792 err = -bt_to_errno(hdev->req_result);
793 break;
794
795 case HCI_REQ_CANCELED:
796 err = -hdev->req_result;
797 break;
798
799 default:
800 err = -ETIMEDOUT;
801 break;
802 }
803
804 hdev->req_status = hdev->req_result = 0;
805
806 BT_DBG("%s end: err %d", hdev->name, err);
807
808 if (err < 0)
809 return ERR_PTR(err);
810
7b1abbbe
JH
811 return hci_get_cmd_complete(hdev, opcode, event);
812}
813EXPORT_SYMBOL(__hci_cmd_sync_ev);
814
815struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 816 const void *param, u32 timeout)
7b1abbbe
JH
817{
818 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
819}
820EXPORT_SYMBOL(__hci_cmd_sync);
821
1da177e4 822/* Execute request and wait for completion. */
01178cd4 823static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
824 void (*func)(struct hci_request *req,
825 unsigned long opt),
01178cd4 826 unsigned long opt, __u32 timeout)
1da177e4 827{
42c6b129 828 struct hci_request req;
1da177e4
LT
829 DECLARE_WAITQUEUE(wait, current);
830 int err = 0;
831
832 BT_DBG("%s start", hdev->name);
833
42c6b129
JH
834 hci_req_init(&req, hdev);
835
1da177e4
LT
836 hdev->req_status = HCI_REQ_PEND;
837
42c6b129 838 func(&req, opt);
53cce22d 839
42c6b129
JH
840 err = hci_req_run(&req, hci_req_sync_complete);
841 if (err < 0) {
53cce22d 842 hdev->req_status = 0;
920c8300
AG
843
844 /* ENODATA means the HCI request command queue is empty.
845 * This can happen when a request with conditionals doesn't
846 * trigger any commands to be sent. This is normal behavior
847 * and should not trigger an error return.
42c6b129 848 */
920c8300
AG
849 if (err == -ENODATA)
850 return 0;
851
852 return err;
53cce22d
JH
853 }
854
bc4445c7
AG
855 add_wait_queue(&hdev->req_wait_q, &wait);
856 set_current_state(TASK_INTERRUPTIBLE);
857
1da177e4
LT
858 schedule_timeout(timeout);
859
860 remove_wait_queue(&hdev->req_wait_q, &wait);
861
862 if (signal_pending(current))
863 return -EINTR;
864
865 switch (hdev->req_status) {
866 case HCI_REQ_DONE:
e175072f 867 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
868 break;
869
870 case HCI_REQ_CANCELED:
871 err = -hdev->req_result;
872 break;
873
874 default:
875 err = -ETIMEDOUT;
876 break;
3ff50b79 877 }
1da177e4 878
a5040efa 879 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
880
881 BT_DBG("%s end: err %d", hdev->name, err);
882
883 return err;
884}
885
01178cd4 886static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
887 void (*req)(struct hci_request *req,
888 unsigned long opt),
01178cd4 889 unsigned long opt, __u32 timeout)
1da177e4
LT
890{
891 int ret;
892
7c6a329e
MH
893 if (!test_bit(HCI_UP, &hdev->flags))
894 return -ENETDOWN;
895
1da177e4
LT
896 /* Serialize all requests */
897 hci_req_lock(hdev);
01178cd4 898 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
899 hci_req_unlock(hdev);
900
901 return ret;
902}
903
42c6b129 904static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 905{
42c6b129 906 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
907
908 /* Reset device */
42c6b129
JH
909 set_bit(HCI_RESET, &req->hdev->flags);
910 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
911}
912
42c6b129 913static void bredr_init(struct hci_request *req)
1da177e4 914{
42c6b129 915 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 916
1da177e4 917 /* Read Local Supported Features */
42c6b129 918 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 919
1143e5a6 920 /* Read Local Version */
42c6b129 921 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
922
923 /* Read BD Address */
42c6b129 924 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
925}
926
42c6b129 927static void amp_init(struct hci_request *req)
e61ef499 928{
42c6b129 929 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 930
e61ef499 931 /* Read Local Version */
42c6b129 932 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 933
f6996cfe
MH
934 /* Read Local Supported Commands */
935 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
936
937 /* Read Local Supported Features */
938 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
939
6bcbc489 940 /* Read Local AMP Info */
42c6b129 941 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
942
943 /* Read Data Blk size */
42c6b129 944 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 945
f38ba941
MH
946 /* Read Flow Control Mode */
947 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
948
7528ca1c
MH
949 /* Read Location Data */
950 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
951}
952
42c6b129 953static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 954{
42c6b129 955 struct hci_dev *hdev = req->hdev;
e61ef499
AE
956
957 BT_DBG("%s %ld", hdev->name, opt);
958
11778716
AE
959 /* Reset */
960 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 961 hci_reset_req(req, 0);
11778716 962
e61ef499
AE
963 switch (hdev->dev_type) {
964 case HCI_BREDR:
42c6b129 965 bredr_init(req);
e61ef499
AE
966 break;
967
968 case HCI_AMP:
42c6b129 969 amp_init(req);
e61ef499
AE
970 break;
971
972 default:
973 BT_ERR("Unknown device type %d", hdev->dev_type);
974 break;
975 }
e61ef499
AE
976}
977
42c6b129 978static void bredr_setup(struct hci_request *req)
2177bab5 979{
4ca048e3
MH
980 struct hci_dev *hdev = req->hdev;
981
2177bab5
JH
982 __le16 param;
983 __u8 flt_type;
984
985 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 986 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
987
988 /* Read Class of Device */
42c6b129 989 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
990
991 /* Read Local Name */
42c6b129 992 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
993
994 /* Read Voice Setting */
42c6b129 995 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 996
b4cb9fb2
MH
997 /* Read Number of Supported IAC */
998 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
999
4b836f39
MH
1000 /* Read Current IAC LAP */
1001 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1002
2177bab5
JH
1003 /* Clear Event Filters */
1004 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1005 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1006
1007 /* Connection accept timeout ~20 secs */
1008 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1009 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1010
4ca048e3
MH
1011 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1012 * but it does not support page scan related HCI commands.
1013 */
1014 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1015 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1016 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1017 }
2177bab5
JH
1018}
1019
42c6b129 1020static void le_setup(struct hci_request *req)
2177bab5 1021{
c73eee91
JH
1022 struct hci_dev *hdev = req->hdev;
1023
2177bab5 1024 /* Read LE Buffer Size */
42c6b129 1025 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1026
1027 /* Read LE Local Supported Features */
42c6b129 1028 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
1029
1030 /* Read LE Advertising Channel TX Power */
42c6b129 1031 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1032
1033 /* Read LE White List Size */
42c6b129 1034 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
1035
1036 /* Read LE Supported States */
42c6b129 1037 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
1038
1039 /* LE-only controllers have LE implicitly enabled */
1040 if (!lmp_bredr_capable(hdev))
1041 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1042}
1043
1044static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1045{
1046 if (lmp_ext_inq_capable(hdev))
1047 return 0x02;
1048
1049 if (lmp_inq_rssi_capable(hdev))
1050 return 0x01;
1051
1052 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1053 hdev->lmp_subver == 0x0757)
1054 return 0x01;
1055
1056 if (hdev->manufacturer == 15) {
1057 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1058 return 0x01;
1059 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1060 return 0x01;
1061 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1062 return 0x01;
1063 }
1064
1065 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1066 hdev->lmp_subver == 0x1805)
1067 return 0x01;
1068
1069 return 0x00;
1070}
1071
42c6b129 1072static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1073{
1074 u8 mode;
1075
42c6b129 1076 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1077
42c6b129 1078 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1079}
1080
42c6b129 1081static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1082{
42c6b129
JH
1083 struct hci_dev *hdev = req->hdev;
1084
2177bab5
JH
1085 /* The second byte is 0xff instead of 0x9f (two reserved bits
1086 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1087 * command otherwise.
1088 */
1089 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1090
1091 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1092 * any event mask for pre 1.2 devices.
1093 */
1094 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1095 return;
1096
1097 if (lmp_bredr_capable(hdev)) {
1098 events[4] |= 0x01; /* Flow Specification Complete */
1099 events[4] |= 0x02; /* Inquiry Result with RSSI */
1100 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1101 events[5] |= 0x08; /* Synchronous Connection Complete */
1102 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1103 } else {
1104 /* Use a different default for LE-only devices */
1105 memset(events, 0, sizeof(events));
1106 events[0] |= 0x10; /* Disconnection Complete */
1107 events[0] |= 0x80; /* Encryption Change */
1108 events[1] |= 0x08; /* Read Remote Version Information Complete */
1109 events[1] |= 0x20; /* Command Complete */
1110 events[1] |= 0x40; /* Command Status */
1111 events[1] |= 0x80; /* Hardware Error */
1112 events[2] |= 0x04; /* Number of Completed Packets */
1113 events[3] |= 0x02; /* Data Buffer Overflow */
1114 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1115 }
1116
1117 if (lmp_inq_rssi_capable(hdev))
1118 events[4] |= 0x02; /* Inquiry Result with RSSI */
1119
1120 if (lmp_sniffsubr_capable(hdev))
1121 events[5] |= 0x20; /* Sniff Subrating */
1122
1123 if (lmp_pause_enc_capable(hdev))
1124 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1125
1126 if (lmp_ext_inq_capable(hdev))
1127 events[5] |= 0x40; /* Extended Inquiry Result */
1128
1129 if (lmp_no_flush_capable(hdev))
1130 events[7] |= 0x01; /* Enhanced Flush Complete */
1131
1132 if (lmp_lsto_capable(hdev))
1133 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1134
1135 if (lmp_ssp_capable(hdev)) {
1136 events[6] |= 0x01; /* IO Capability Request */
1137 events[6] |= 0x02; /* IO Capability Response */
1138 events[6] |= 0x04; /* User Confirmation Request */
1139 events[6] |= 0x08; /* User Passkey Request */
1140 events[6] |= 0x10; /* Remote OOB Data Request */
1141 events[6] |= 0x20; /* Simple Pairing Complete */
1142 events[7] |= 0x04; /* User Passkey Notification */
1143 events[7] |= 0x08; /* Keypress Notification */
1144 events[7] |= 0x10; /* Remote Host Supported
1145 * Features Notification
1146 */
1147 }
1148
1149 if (lmp_le_capable(hdev))
1150 events[7] |= 0x20; /* LE Meta-Event */
1151
42c6b129 1152 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1153
1154 if (lmp_le_capable(hdev)) {
1155 memset(events, 0, sizeof(events));
1156 events[0] = 0x1f;
42c6b129
JH
1157 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1158 sizeof(events), events);
2177bab5
JH
1159 }
1160}
1161
42c6b129 1162static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1163{
42c6b129
JH
1164 struct hci_dev *hdev = req->hdev;
1165
2177bab5 1166 if (lmp_bredr_capable(hdev))
42c6b129 1167 bredr_setup(req);
56f87901
JH
1168 else
1169 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1170
1171 if (lmp_le_capable(hdev))
42c6b129 1172 le_setup(req);
2177bab5 1173
42c6b129 1174 hci_setup_event_mask(req);
2177bab5 1175
3f8e2d75
JH
1176 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1177 * local supported commands HCI command.
1178 */
1179 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1180 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1181
1182 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1183 /* When SSP is available, then the host features page
1184 * should also be available as well. However some
1185 * controllers list the max_page as 0 as long as SSP
1186 * has not been enabled. To achieve proper debugging
1187 * output, force the minimum max_page to 1 at least.
1188 */
1189 hdev->max_page = 0x01;
1190
2177bab5
JH
1191 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1192 u8 mode = 0x01;
42c6b129
JH
1193 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1194 sizeof(mode), &mode);
2177bab5
JH
1195 } else {
1196 struct hci_cp_write_eir cp;
1197
1198 memset(hdev->eir, 0, sizeof(hdev->eir));
1199 memset(&cp, 0, sizeof(cp));
1200
42c6b129 1201 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1202 }
1203 }
1204
1205 if (lmp_inq_rssi_capable(hdev))
42c6b129 1206 hci_setup_inquiry_mode(req);
2177bab5
JH
1207
1208 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1209 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1210
1211 if (lmp_ext_feat_capable(hdev)) {
1212 struct hci_cp_read_local_ext_features cp;
1213
1214 cp.page = 0x01;
42c6b129
JH
1215 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1216 sizeof(cp), &cp);
2177bab5
JH
1217 }
1218
1219 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1220 u8 enable = 1;
42c6b129
JH
1221 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1222 &enable);
2177bab5
JH
1223 }
1224}
1225
42c6b129 1226static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1227{
42c6b129 1228 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1229 struct hci_cp_write_def_link_policy cp;
1230 u16 link_policy = 0;
1231
1232 if (lmp_rswitch_capable(hdev))
1233 link_policy |= HCI_LP_RSWITCH;
1234 if (lmp_hold_capable(hdev))
1235 link_policy |= HCI_LP_HOLD;
1236 if (lmp_sniff_capable(hdev))
1237 link_policy |= HCI_LP_SNIFF;
1238 if (lmp_park_capable(hdev))
1239 link_policy |= HCI_LP_PARK;
1240
1241 cp.policy = cpu_to_le16(link_policy);
42c6b129 1242 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1243}
1244
42c6b129 1245static void hci_set_le_support(struct hci_request *req)
2177bab5 1246{
42c6b129 1247 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1248 struct hci_cp_write_le_host_supported cp;
1249
c73eee91
JH
1250 /* LE-only devices do not support explicit enablement */
1251 if (!lmp_bredr_capable(hdev))
1252 return;
1253
2177bab5
JH
1254 memset(&cp, 0, sizeof(cp));
1255
1256 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1257 cp.le = 0x01;
1258 cp.simul = lmp_le_br_capable(hdev);
1259 }
1260
1261 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1262 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1263 &cp);
2177bab5
JH
1264}
1265
d62e6d67
JH
1266static void hci_set_event_mask_page_2(struct hci_request *req)
1267{
1268 struct hci_dev *hdev = req->hdev;
1269 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1270
1271 /* If Connectionless Slave Broadcast master role is supported
1272 * enable all necessary events for it.
1273 */
53b834d2 1274 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1275 events[1] |= 0x40; /* Triggered Clock Capture */
1276 events[1] |= 0x80; /* Synchronization Train Complete */
1277 events[2] |= 0x10; /* Slave Page Response Timeout */
1278 events[2] |= 0x20; /* CSB Channel Map Change */
1279 }
1280
1281 /* If Connectionless Slave Broadcast slave role is supported
1282 * enable all necessary events for it.
1283 */
53b834d2 1284 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1285 events[2] |= 0x01; /* Synchronization Train Received */
1286 events[2] |= 0x02; /* CSB Receive */
1287 events[2] |= 0x04; /* CSB Timeout */
1288 events[2] |= 0x08; /* Truncated Page Complete */
1289 }
1290
1291 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1292}
1293
42c6b129 1294static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1295{
42c6b129 1296 struct hci_dev *hdev = req->hdev;
d2c5d77f 1297 u8 p;
42c6b129 1298
b8f4e068
GP
1299 /* Some Broadcom based Bluetooth controllers do not support the
1300 * Delete Stored Link Key command. They are clearly indicating its
1301 * absence in the bit mask of supported commands.
1302 *
1303 * Check the supported commands and only if the the command is marked
1304 * as supported send it. If not supported assume that the controller
1305 * does not have actual support for stored link keys which makes this
1306 * command redundant anyway.
f9f462fa
MH
1307 *
1308 * Some controllers indicate that they support handling deleting
1309 * stored link keys, but they don't. The quirk lets a driver
1310 * just disable this command.
637b4cae 1311 */
f9f462fa
MH
1312 if (hdev->commands[6] & 0x80 &&
1313 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1314 struct hci_cp_delete_stored_link_key cp;
1315
1316 bacpy(&cp.bdaddr, BDADDR_ANY);
1317 cp.delete_all = 0x01;
1318 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1319 sizeof(cp), &cp);
1320 }
1321
2177bab5 1322 if (hdev->commands[5] & 0x10)
42c6b129 1323 hci_setup_link_policy(req);
2177bab5 1324
79830f66 1325 if (lmp_le_capable(hdev)) {
bef34c0a
MH
1326 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1327 /* If the controller has a public BD_ADDR, then
1328 * by default use that one. If this is a LE only
1329 * controller without a public address, default
1330 * to the random address.
1331 */
1332 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1333 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1334 else
1335 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1336 }
79830f66 1337
42c6b129 1338 hci_set_le_support(req);
79830f66 1339 }
d2c5d77f
JH
1340
1341 /* Read features beyond page 1 if available */
1342 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1343 struct hci_cp_read_local_ext_features cp;
1344
1345 cp.page = p;
1346 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1347 sizeof(cp), &cp);
1348 }
2177bab5
JH
1349}
1350
5d4e7e8d
JH
1351static void hci_init4_req(struct hci_request *req, unsigned long opt)
1352{
1353 struct hci_dev *hdev = req->hdev;
1354
d62e6d67
JH
1355 /* Set event mask page 2 if the HCI command for it is supported */
1356 if (hdev->commands[22] & 0x04)
1357 hci_set_event_mask_page_2(req);
1358
5d4e7e8d 1359 /* Check for Synchronization Train support */
53b834d2 1360 if (lmp_sync_train_capable(hdev))
5d4e7e8d
JH
1361 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1362}
1363
2177bab5
JH
1364static int __hci_init(struct hci_dev *hdev)
1365{
1366 int err;
1367
1368 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1369 if (err < 0)
1370 return err;
1371
4b4148e9
MH
1372 /* The Device Under Test (DUT) mode is special and available for
1373 * all controller types. So just create it early on.
1374 */
1375 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1376 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1377 &dut_mode_fops);
1378 }
1379
2177bab5
JH
1380 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1381 * BR/EDR/LE type controllers. AMP controllers only need the
1382 * first stage init.
1383 */
1384 if (hdev->dev_type != HCI_BREDR)
1385 return 0;
1386
1387 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1388 if (err < 0)
1389 return err;
1390
5d4e7e8d
JH
1391 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1392 if (err < 0)
1393 return err;
1394
baf27f6e
MH
1395 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1396 if (err < 0)
1397 return err;
1398
1399 /* Only create debugfs entries during the initial setup
1400 * phase and not every time the controller gets powered on.
1401 */
1402 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1403 return 0;
1404
dfb826a8
MH
1405 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1406 &features_fops);
ceeb3bc0
MH
1407 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1408 &hdev->manufacturer);
1409 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1410 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1411 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1412 &blacklist_fops);
47219839
MH
1413 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1414
baf27f6e
MH
1415 if (lmp_bredr_capable(hdev)) {
1416 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1417 hdev, &inquiry_cache_fops);
02d08d15
MH
1418 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1419 hdev, &link_keys_fops);
12c269d7
MH
1420 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1421 hdev, &use_debug_keys_fops);
babdbb3c
MH
1422 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1423 hdev, &dev_class_fops);
041000b9
MH
1424 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1425 hdev, &voice_setting_fops);
baf27f6e
MH
1426 }
1427
06f5b778 1428 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1429 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1430 hdev, &auto_accept_delay_fops);
06f5b778
MH
1431 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1432 hdev, &ssp_debug_mode_fops);
1433 }
ebd1e33b 1434
2bfa3531
MH
1435 if (lmp_sniff_capable(hdev)) {
1436 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1437 hdev, &idle_timeout_fops);
1438 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1439 hdev, &sniff_min_interval_fops);
1440 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1441 hdev, &sniff_max_interval_fops);
1442 }
1443
d0f729b8
MH
1444 if (lmp_le_capable(hdev)) {
1445 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1446 &hdev->le_white_list_size);
e7b8fc92
MH
1447 debugfs_create_file("static_address", 0444, hdev->debugfs,
1448 hdev, &static_address_fops);
92202185
MH
1449 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1450 hdev, &own_address_type_fops);
8f8625cd
MH
1451 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1452 hdev, &long_term_keys_fops);
4e70c7e7
MH
1453 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1454 hdev, &conn_min_interval_fops);
1455 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1456 hdev, &conn_max_interval_fops);
89863109
JR
1457 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1458 &lowpan_debugfs_fops);
d0f729b8 1459 }
e7b8fc92 1460
baf27f6e 1461 return 0;
2177bab5
JH
1462}
1463
42c6b129 1464static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1465{
1466 __u8 scan = opt;
1467
42c6b129 1468 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1469
1470 /* Inquiry and Page scans */
42c6b129 1471 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1472}
1473
42c6b129 1474static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1475{
1476 __u8 auth = opt;
1477
42c6b129 1478 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1479
1480 /* Authentication */
42c6b129 1481 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1482}
1483
42c6b129 1484static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1485{
1486 __u8 encrypt = opt;
1487
42c6b129 1488 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1489
e4e8e37c 1490 /* Encryption */
42c6b129 1491 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1492}
1493
42c6b129 1494static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1495{
1496 __le16 policy = cpu_to_le16(opt);
1497
42c6b129 1498 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1499
1500 /* Default link policy */
42c6b129 1501 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1502}
1503
8e87d142 1504/* Get HCI device by index.
1da177e4
LT
1505 * Device is held on return. */
1506struct hci_dev *hci_dev_get(int index)
1507{
8035ded4 1508 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1509
1510 BT_DBG("%d", index);
1511
1512 if (index < 0)
1513 return NULL;
1514
1515 read_lock(&hci_dev_list_lock);
8035ded4 1516 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1517 if (d->id == index) {
1518 hdev = hci_dev_hold(d);
1519 break;
1520 }
1521 }
1522 read_unlock(&hci_dev_list_lock);
1523 return hdev;
1524}
1da177e4
LT
1525
1526/* ---- Inquiry support ---- */
ff9ef578 1527
30dc78e1
JH
1528bool hci_discovery_active(struct hci_dev *hdev)
1529{
1530 struct discovery_state *discov = &hdev->discovery;
1531
6fbe195d 1532 switch (discov->state) {
343f935b 1533 case DISCOVERY_FINDING:
6fbe195d 1534 case DISCOVERY_RESOLVING:
30dc78e1
JH
1535 return true;
1536
6fbe195d
AG
1537 default:
1538 return false;
1539 }
30dc78e1
JH
1540}
1541
ff9ef578
JH
1542void hci_discovery_set_state(struct hci_dev *hdev, int state)
1543{
1544 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1545
1546 if (hdev->discovery.state == state)
1547 return;
1548
1549 switch (state) {
1550 case DISCOVERY_STOPPED:
7b99b659
AG
1551 if (hdev->discovery.state != DISCOVERY_STARTING)
1552 mgmt_discovering(hdev, 0);
ff9ef578
JH
1553 break;
1554 case DISCOVERY_STARTING:
1555 break;
343f935b 1556 case DISCOVERY_FINDING:
ff9ef578
JH
1557 mgmt_discovering(hdev, 1);
1558 break;
30dc78e1
JH
1559 case DISCOVERY_RESOLVING:
1560 break;
ff9ef578
JH
1561 case DISCOVERY_STOPPING:
1562 break;
1563 }
1564
1565 hdev->discovery.state = state;
1566}
1567
1f9b9a5d 1568void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1569{
30883512 1570 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1571 struct inquiry_entry *p, *n;
1da177e4 1572
561aafbc
JH
1573 list_for_each_entry_safe(p, n, &cache->all, all) {
1574 list_del(&p->all);
b57c1a56 1575 kfree(p);
1da177e4 1576 }
561aafbc
JH
1577
1578 INIT_LIST_HEAD(&cache->unknown);
1579 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1580}
1581
a8c5fb1a
GP
1582struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1583 bdaddr_t *bdaddr)
1da177e4 1584{
30883512 1585 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1586 struct inquiry_entry *e;
1587
6ed93dc6 1588 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1589
561aafbc
JH
1590 list_for_each_entry(e, &cache->all, all) {
1591 if (!bacmp(&e->data.bdaddr, bdaddr))
1592 return e;
1593 }
1594
1595 return NULL;
1596}
1597
1598struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1599 bdaddr_t *bdaddr)
561aafbc 1600{
30883512 1601 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1602 struct inquiry_entry *e;
1603
6ed93dc6 1604 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1605
1606 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1607 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1608 return e;
1609 }
1610
1611 return NULL;
1da177e4
LT
1612}
1613
30dc78e1 1614struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1615 bdaddr_t *bdaddr,
1616 int state)
30dc78e1
JH
1617{
1618 struct discovery_state *cache = &hdev->discovery;
1619 struct inquiry_entry *e;
1620
6ed93dc6 1621 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1622
1623 list_for_each_entry(e, &cache->resolve, list) {
1624 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1625 return e;
1626 if (!bacmp(&e->data.bdaddr, bdaddr))
1627 return e;
1628 }
1629
1630 return NULL;
1631}
1632
a3d4e20a 1633void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1634 struct inquiry_entry *ie)
a3d4e20a
JH
1635{
1636 struct discovery_state *cache = &hdev->discovery;
1637 struct list_head *pos = &cache->resolve;
1638 struct inquiry_entry *p;
1639
1640 list_del(&ie->list);
1641
1642 list_for_each_entry(p, &cache->resolve, list) {
1643 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1644 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1645 break;
1646 pos = &p->list;
1647 }
1648
1649 list_add(&ie->list, pos);
1650}
1651
3175405b 1652bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1653 bool name_known, bool *ssp)
1da177e4 1654{
30883512 1655 struct discovery_state *cache = &hdev->discovery;
70f23020 1656 struct inquiry_entry *ie;
1da177e4 1657
6ed93dc6 1658 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1659
2b2fec4d
SJ
1660 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1661
388fc8fa
JH
1662 if (ssp)
1663 *ssp = data->ssp_mode;
1664
70f23020 1665 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1666 if (ie) {
388fc8fa
JH
1667 if (ie->data.ssp_mode && ssp)
1668 *ssp = true;
1669
a3d4e20a 1670 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1671 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1672 ie->data.rssi = data->rssi;
1673 hci_inquiry_cache_update_resolve(hdev, ie);
1674 }
1675
561aafbc 1676 goto update;
a3d4e20a 1677 }
561aafbc
JH
1678
1679 /* Entry not in the cache. Add new one. */
1680 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1681 if (!ie)
3175405b 1682 return false;
561aafbc
JH
1683
1684 list_add(&ie->all, &cache->all);
1685
1686 if (name_known) {
1687 ie->name_state = NAME_KNOWN;
1688 } else {
1689 ie->name_state = NAME_NOT_KNOWN;
1690 list_add(&ie->list, &cache->unknown);
1691 }
70f23020 1692
561aafbc
JH
1693update:
1694 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1695 ie->name_state != NAME_PENDING) {
561aafbc
JH
1696 ie->name_state = NAME_KNOWN;
1697 list_del(&ie->list);
1da177e4
LT
1698 }
1699
70f23020
AE
1700 memcpy(&ie->data, data, sizeof(*data));
1701 ie->timestamp = jiffies;
1da177e4 1702 cache->timestamp = jiffies;
3175405b
JH
1703
1704 if (ie->name_state == NAME_NOT_KNOWN)
1705 return false;
1706
1707 return true;
1da177e4
LT
1708}
1709
1710static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1711{
30883512 1712 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1713 struct inquiry_info *info = (struct inquiry_info *) buf;
1714 struct inquiry_entry *e;
1715 int copied = 0;
1716
561aafbc 1717 list_for_each_entry(e, &cache->all, all) {
1da177e4 1718 struct inquiry_data *data = &e->data;
b57c1a56
JH
1719
1720 if (copied >= num)
1721 break;
1722
1da177e4
LT
1723 bacpy(&info->bdaddr, &data->bdaddr);
1724 info->pscan_rep_mode = data->pscan_rep_mode;
1725 info->pscan_period_mode = data->pscan_period_mode;
1726 info->pscan_mode = data->pscan_mode;
1727 memcpy(info->dev_class, data->dev_class, 3);
1728 info->clock_offset = data->clock_offset;
b57c1a56 1729
1da177e4 1730 info++;
b57c1a56 1731 copied++;
1da177e4
LT
1732 }
1733
1734 BT_DBG("cache %p, copied %d", cache, copied);
1735 return copied;
1736}
1737
42c6b129 1738static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1739{
1740 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1741 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1742 struct hci_cp_inquiry cp;
1743
1744 BT_DBG("%s", hdev->name);
1745
1746 if (test_bit(HCI_INQUIRY, &hdev->flags))
1747 return;
1748
1749 /* Start Inquiry */
1750 memcpy(&cp.lap, &ir->lap, 3);
1751 cp.length = ir->length;
1752 cp.num_rsp = ir->num_rsp;
42c6b129 1753 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1754}
1755
3e13fa1e
AG
1756static int wait_inquiry(void *word)
1757{
1758 schedule();
1759 return signal_pending(current);
1760}
1761
1da177e4
LT
1762int hci_inquiry(void __user *arg)
1763{
1764 __u8 __user *ptr = arg;
1765 struct hci_inquiry_req ir;
1766 struct hci_dev *hdev;
1767 int err = 0, do_inquiry = 0, max_rsp;
1768 long timeo;
1769 __u8 *buf;
1770
1771 if (copy_from_user(&ir, ptr, sizeof(ir)))
1772 return -EFAULT;
1773
5a08ecce
AE
1774 hdev = hci_dev_get(ir.dev_id);
1775 if (!hdev)
1da177e4
LT
1776 return -ENODEV;
1777
0736cfa8
MH
1778 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1779 err = -EBUSY;
1780 goto done;
1781 }
1782
5b69bef5
MH
1783 if (hdev->dev_type != HCI_BREDR) {
1784 err = -EOPNOTSUPP;
1785 goto done;
1786 }
1787
56f87901
JH
1788 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1789 err = -EOPNOTSUPP;
1790 goto done;
1791 }
1792
09fd0de5 1793 hci_dev_lock(hdev);
8e87d142 1794 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1795 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1796 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1797 do_inquiry = 1;
1798 }
09fd0de5 1799 hci_dev_unlock(hdev);
1da177e4 1800
04837f64 1801 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1802
1803 if (do_inquiry) {
01178cd4
JH
1804 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1805 timeo);
70f23020
AE
1806 if (err < 0)
1807 goto done;
3e13fa1e
AG
1808
1809 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1810 * cleared). If it is interrupted by a signal, return -EINTR.
1811 */
1812 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1813 TASK_INTERRUPTIBLE))
1814 return -EINTR;
70f23020 1815 }
1da177e4 1816
8fc9ced3
GP
1817 /* for unlimited number of responses we will use buffer with
1818 * 255 entries
1819 */
1da177e4
LT
1820 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1821
1822 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1823 * copy it to the user space.
1824 */
01df8c31 1825 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1826 if (!buf) {
1da177e4
LT
1827 err = -ENOMEM;
1828 goto done;
1829 }
1830
09fd0de5 1831 hci_dev_lock(hdev);
1da177e4 1832 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1833 hci_dev_unlock(hdev);
1da177e4
LT
1834
1835 BT_DBG("num_rsp %d", ir.num_rsp);
1836
1837 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1838 ptr += sizeof(ir);
1839 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1840 ir.num_rsp))
1da177e4 1841 err = -EFAULT;
8e87d142 1842 } else
1da177e4
LT
1843 err = -EFAULT;
1844
1845 kfree(buf);
1846
1847done:
1848 hci_dev_put(hdev);
1849 return err;
1850}
1851
cbed0ca1 1852static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1853{
1da177e4
LT
1854 int ret = 0;
1855
1da177e4
LT
1856 BT_DBG("%s %p", hdev->name, hdev);
1857
1858 hci_req_lock(hdev);
1859
94324962
JH
1860 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1861 ret = -ENODEV;
1862 goto done;
1863 }
1864
a5c8f270
MH
1865 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1866 /* Check for rfkill but allow the HCI setup stage to
1867 * proceed (which in itself doesn't cause any RF activity).
1868 */
1869 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1870 ret = -ERFKILL;
1871 goto done;
1872 }
1873
1874 /* Check for valid public address or a configured static
1875 * random adddress, but let the HCI setup proceed to
1876 * be able to determine if there is a public address
1877 * or not.
1878 *
1879 * This check is only valid for BR/EDR controllers
1880 * since AMP controllers do not have an address.
1881 */
1882 if (hdev->dev_type == HCI_BREDR &&
1883 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1884 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1885 ret = -EADDRNOTAVAIL;
1886 goto done;
1887 }
611b30f7
MH
1888 }
1889
1da177e4
LT
1890 if (test_bit(HCI_UP, &hdev->flags)) {
1891 ret = -EALREADY;
1892 goto done;
1893 }
1894
1da177e4
LT
1895 if (hdev->open(hdev)) {
1896 ret = -EIO;
1897 goto done;
1898 }
1899
f41c70c4
MH
1900 atomic_set(&hdev->cmd_cnt, 1);
1901 set_bit(HCI_INIT, &hdev->flags);
1902
1903 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1904 ret = hdev->setup(hdev);
1905
1906 if (!ret) {
f41c70c4
MH
1907 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1908 set_bit(HCI_RAW, &hdev->flags);
1909
0736cfa8
MH
1910 if (!test_bit(HCI_RAW, &hdev->flags) &&
1911 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1912 ret = __hci_init(hdev);
1da177e4
LT
1913 }
1914
f41c70c4
MH
1915 clear_bit(HCI_INIT, &hdev->flags);
1916
1da177e4
LT
1917 if (!ret) {
1918 hci_dev_hold(hdev);
1919 set_bit(HCI_UP, &hdev->flags);
1920 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1921 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1922 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1923 hdev->dev_type == HCI_BREDR) {
09fd0de5 1924 hci_dev_lock(hdev);
744cf19e 1925 mgmt_powered(hdev, 1);
09fd0de5 1926 hci_dev_unlock(hdev);
56e5cb86 1927 }
8e87d142 1928 } else {
1da177e4 1929 /* Init failed, cleanup */
3eff45ea 1930 flush_work(&hdev->tx_work);
c347b765 1931 flush_work(&hdev->cmd_work);
b78752cc 1932 flush_work(&hdev->rx_work);
1da177e4
LT
1933
1934 skb_queue_purge(&hdev->cmd_q);
1935 skb_queue_purge(&hdev->rx_q);
1936
1937 if (hdev->flush)
1938 hdev->flush(hdev);
1939
1940 if (hdev->sent_cmd) {
1941 kfree_skb(hdev->sent_cmd);
1942 hdev->sent_cmd = NULL;
1943 }
1944
1945 hdev->close(hdev);
1946 hdev->flags = 0;
1947 }
1948
1949done:
1950 hci_req_unlock(hdev);
1da177e4
LT
1951 return ret;
1952}
1953
cbed0ca1
JH
1954/* ---- HCI ioctl helpers ---- */
1955
1956int hci_dev_open(__u16 dev)
1957{
1958 struct hci_dev *hdev;
1959 int err;
1960
1961 hdev = hci_dev_get(dev);
1962 if (!hdev)
1963 return -ENODEV;
1964
e1d08f40
JH
1965 /* We need to ensure that no other power on/off work is pending
1966 * before proceeding to call hci_dev_do_open. This is
1967 * particularly important if the setup procedure has not yet
1968 * completed.
1969 */
1970 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1971 cancel_delayed_work(&hdev->power_off);
1972
a5c8f270
MH
1973 /* After this call it is guaranteed that the setup procedure
1974 * has finished. This means that error conditions like RFKILL
1975 * or no valid public or static random address apply.
1976 */
e1d08f40
JH
1977 flush_workqueue(hdev->req_workqueue);
1978
cbed0ca1
JH
1979 err = hci_dev_do_open(hdev);
1980
1981 hci_dev_put(hdev);
1982
1983 return err;
1984}
1985
1da177e4
LT
1986static int hci_dev_do_close(struct hci_dev *hdev)
1987{
1988 BT_DBG("%s %p", hdev->name, hdev);
1989
78c04c0b
VCG
1990 cancel_delayed_work(&hdev->power_off);
1991
1da177e4
LT
1992 hci_req_cancel(hdev, ENODEV);
1993 hci_req_lock(hdev);
1994
1995 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1996 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1997 hci_req_unlock(hdev);
1998 return 0;
1999 }
2000
3eff45ea
GP
2001 /* Flush RX and TX works */
2002 flush_work(&hdev->tx_work);
b78752cc 2003 flush_work(&hdev->rx_work);
1da177e4 2004
16ab91ab 2005 if (hdev->discov_timeout > 0) {
e0f9309f 2006 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2007 hdev->discov_timeout = 0;
5e5282bb 2008 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2009 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2010 }
2011
a8b2d5c2 2012 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2013 cancel_delayed_work(&hdev->service_cache);
2014
7ba8b4be
AG
2015 cancel_delayed_work_sync(&hdev->le_scan_disable);
2016
09fd0de5 2017 hci_dev_lock(hdev);
1f9b9a5d 2018 hci_inquiry_cache_flush(hdev);
1da177e4 2019 hci_conn_hash_flush(hdev);
09fd0de5 2020 hci_dev_unlock(hdev);
1da177e4
LT
2021
2022 hci_notify(hdev, HCI_DEV_DOWN);
2023
2024 if (hdev->flush)
2025 hdev->flush(hdev);
2026
2027 /* Reset device */
2028 skb_queue_purge(&hdev->cmd_q);
2029 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2030 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2031 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2032 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2033 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2034 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2035 clear_bit(HCI_INIT, &hdev->flags);
2036 }
2037
c347b765
GP
2038 /* flush cmd work */
2039 flush_work(&hdev->cmd_work);
1da177e4
LT
2040
2041 /* Drop queues */
2042 skb_queue_purge(&hdev->rx_q);
2043 skb_queue_purge(&hdev->cmd_q);
2044 skb_queue_purge(&hdev->raw_q);
2045
2046 /* Drop last sent command */
2047 if (hdev->sent_cmd) {
b79f44c1 2048 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2049 kfree_skb(hdev->sent_cmd);
2050 hdev->sent_cmd = NULL;
2051 }
2052
b6ddb638
JH
2053 kfree_skb(hdev->recv_evt);
2054 hdev->recv_evt = NULL;
2055
1da177e4
LT
2056 /* After this point our queues are empty
2057 * and no tasks are scheduled. */
2058 hdev->close(hdev);
2059
35b973c9
JH
2060 /* Clear flags */
2061 hdev->flags = 0;
2062 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2063
93c311a0
MH
2064 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2065 if (hdev->dev_type == HCI_BREDR) {
2066 hci_dev_lock(hdev);
2067 mgmt_powered(hdev, 0);
2068 hci_dev_unlock(hdev);
2069 }
8ee56540 2070 }
5add6af8 2071
ced5c338 2072 /* Controller radio is available but is currently powered down */
536619e8 2073 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2074
e59fda8d 2075 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2076 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 2077
1da177e4
LT
2078 hci_req_unlock(hdev);
2079
2080 hci_dev_put(hdev);
2081 return 0;
2082}
2083
2084int hci_dev_close(__u16 dev)
2085{
2086 struct hci_dev *hdev;
2087 int err;
2088
70f23020
AE
2089 hdev = hci_dev_get(dev);
2090 if (!hdev)
1da177e4 2091 return -ENODEV;
8ee56540 2092
0736cfa8
MH
2093 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2094 err = -EBUSY;
2095 goto done;
2096 }
2097
8ee56540
MH
2098 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2099 cancel_delayed_work(&hdev->power_off);
2100
1da177e4 2101 err = hci_dev_do_close(hdev);
8ee56540 2102
0736cfa8 2103done:
1da177e4
LT
2104 hci_dev_put(hdev);
2105 return err;
2106}
2107
2108int hci_dev_reset(__u16 dev)
2109{
2110 struct hci_dev *hdev;
2111 int ret = 0;
2112
70f23020
AE
2113 hdev = hci_dev_get(dev);
2114 if (!hdev)
1da177e4
LT
2115 return -ENODEV;
2116
2117 hci_req_lock(hdev);
1da177e4 2118
808a049e
MH
2119 if (!test_bit(HCI_UP, &hdev->flags)) {
2120 ret = -ENETDOWN;
1da177e4 2121 goto done;
808a049e 2122 }
1da177e4 2123
0736cfa8
MH
2124 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2125 ret = -EBUSY;
2126 goto done;
2127 }
2128
1da177e4
LT
2129 /* Drop queues */
2130 skb_queue_purge(&hdev->rx_q);
2131 skb_queue_purge(&hdev->cmd_q);
2132
09fd0de5 2133 hci_dev_lock(hdev);
1f9b9a5d 2134 hci_inquiry_cache_flush(hdev);
1da177e4 2135 hci_conn_hash_flush(hdev);
09fd0de5 2136 hci_dev_unlock(hdev);
1da177e4
LT
2137
2138 if (hdev->flush)
2139 hdev->flush(hdev);
2140
8e87d142 2141 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2142 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2143
2144 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2145 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2146
2147done:
1da177e4
LT
2148 hci_req_unlock(hdev);
2149 hci_dev_put(hdev);
2150 return ret;
2151}
2152
2153int hci_dev_reset_stat(__u16 dev)
2154{
2155 struct hci_dev *hdev;
2156 int ret = 0;
2157
70f23020
AE
2158 hdev = hci_dev_get(dev);
2159 if (!hdev)
1da177e4
LT
2160 return -ENODEV;
2161
0736cfa8
MH
2162 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2163 ret = -EBUSY;
2164 goto done;
2165 }
2166
1da177e4
LT
2167 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2168
0736cfa8 2169done:
1da177e4 2170 hci_dev_put(hdev);
1da177e4
LT
2171 return ret;
2172}
2173
2174int hci_dev_cmd(unsigned int cmd, void __user *arg)
2175{
2176 struct hci_dev *hdev;
2177 struct hci_dev_req dr;
2178 int err = 0;
2179
2180 if (copy_from_user(&dr, arg, sizeof(dr)))
2181 return -EFAULT;
2182
70f23020
AE
2183 hdev = hci_dev_get(dr.dev_id);
2184 if (!hdev)
1da177e4
LT
2185 return -ENODEV;
2186
0736cfa8
MH
2187 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2188 err = -EBUSY;
2189 goto done;
2190 }
2191
5b69bef5
MH
2192 if (hdev->dev_type != HCI_BREDR) {
2193 err = -EOPNOTSUPP;
2194 goto done;
2195 }
2196
56f87901
JH
2197 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2198 err = -EOPNOTSUPP;
2199 goto done;
2200 }
2201
1da177e4
LT
2202 switch (cmd) {
2203 case HCISETAUTH:
01178cd4
JH
2204 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2205 HCI_INIT_TIMEOUT);
1da177e4
LT
2206 break;
2207
2208 case HCISETENCRYPT:
2209 if (!lmp_encrypt_capable(hdev)) {
2210 err = -EOPNOTSUPP;
2211 break;
2212 }
2213
2214 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2215 /* Auth must be enabled first */
01178cd4
JH
2216 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2217 HCI_INIT_TIMEOUT);
1da177e4
LT
2218 if (err)
2219 break;
2220 }
2221
01178cd4
JH
2222 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2223 HCI_INIT_TIMEOUT);
1da177e4
LT
2224 break;
2225
2226 case HCISETSCAN:
01178cd4
JH
2227 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2228 HCI_INIT_TIMEOUT);
1da177e4
LT
2229 break;
2230
1da177e4 2231 case HCISETLINKPOL:
01178cd4
JH
2232 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2233 HCI_INIT_TIMEOUT);
1da177e4
LT
2234 break;
2235
2236 case HCISETLINKMODE:
e4e8e37c
MH
2237 hdev->link_mode = ((__u16) dr.dev_opt) &
2238 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2239 break;
2240
2241 case HCISETPTYPE:
2242 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2243 break;
2244
2245 case HCISETACLMTU:
e4e8e37c
MH
2246 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2247 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2248 break;
2249
2250 case HCISETSCOMTU:
e4e8e37c
MH
2251 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2252 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2253 break;
2254
2255 default:
2256 err = -EINVAL;
2257 break;
2258 }
e4e8e37c 2259
0736cfa8 2260done:
1da177e4
LT
2261 hci_dev_put(hdev);
2262 return err;
2263}
2264
2265int hci_get_dev_list(void __user *arg)
2266{
8035ded4 2267 struct hci_dev *hdev;
1da177e4
LT
2268 struct hci_dev_list_req *dl;
2269 struct hci_dev_req *dr;
1da177e4
LT
2270 int n = 0, size, err;
2271 __u16 dev_num;
2272
2273 if (get_user(dev_num, (__u16 __user *) arg))
2274 return -EFAULT;
2275
2276 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2277 return -EINVAL;
2278
2279 size = sizeof(*dl) + dev_num * sizeof(*dr);
2280
70f23020
AE
2281 dl = kzalloc(size, GFP_KERNEL);
2282 if (!dl)
1da177e4
LT
2283 return -ENOMEM;
2284
2285 dr = dl->dev_req;
2286
f20d09d5 2287 read_lock(&hci_dev_list_lock);
8035ded4 2288 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2289 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2290 cancel_delayed_work(&hdev->power_off);
c542a06c 2291
a8b2d5c2
JH
2292 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2293 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2294
1da177e4
LT
2295 (dr + n)->dev_id = hdev->id;
2296 (dr + n)->dev_opt = hdev->flags;
c542a06c 2297
1da177e4
LT
2298 if (++n >= dev_num)
2299 break;
2300 }
f20d09d5 2301 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2302
2303 dl->dev_num = n;
2304 size = sizeof(*dl) + n * sizeof(*dr);
2305
2306 err = copy_to_user(arg, dl, size);
2307 kfree(dl);
2308
2309 return err ? -EFAULT : 0;
2310}
2311
2312int hci_get_dev_info(void __user *arg)
2313{
2314 struct hci_dev *hdev;
2315 struct hci_dev_info di;
2316 int err = 0;
2317
2318 if (copy_from_user(&di, arg, sizeof(di)))
2319 return -EFAULT;
2320
70f23020
AE
2321 hdev = hci_dev_get(di.dev_id);
2322 if (!hdev)
1da177e4
LT
2323 return -ENODEV;
2324
a8b2d5c2 2325 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2326 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2327
a8b2d5c2
JH
2328 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2329 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2330
1da177e4
LT
2331 strcpy(di.name, hdev->name);
2332 di.bdaddr = hdev->bdaddr;
60f2a3ed 2333 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2334 di.flags = hdev->flags;
2335 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2336 if (lmp_bredr_capable(hdev)) {
2337 di.acl_mtu = hdev->acl_mtu;
2338 di.acl_pkts = hdev->acl_pkts;
2339 di.sco_mtu = hdev->sco_mtu;
2340 di.sco_pkts = hdev->sco_pkts;
2341 } else {
2342 di.acl_mtu = hdev->le_mtu;
2343 di.acl_pkts = hdev->le_pkts;
2344 di.sco_mtu = 0;
2345 di.sco_pkts = 0;
2346 }
1da177e4
LT
2347 di.link_policy = hdev->link_policy;
2348 di.link_mode = hdev->link_mode;
2349
2350 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2351 memcpy(&di.features, &hdev->features, sizeof(di.features));
2352
2353 if (copy_to_user(arg, &di, sizeof(di)))
2354 err = -EFAULT;
2355
2356 hci_dev_put(hdev);
2357
2358 return err;
2359}
2360
2361/* ---- Interface to HCI drivers ---- */
2362
611b30f7
MH
2363static int hci_rfkill_set_block(void *data, bool blocked)
2364{
2365 struct hci_dev *hdev = data;
2366
2367 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2368
0736cfa8
MH
2369 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2370 return -EBUSY;
2371
5e130367
JH
2372 if (blocked) {
2373 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2374 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2375 hci_dev_do_close(hdev);
5e130367
JH
2376 } else {
2377 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2378 }
611b30f7
MH
2379
2380 return 0;
2381}
2382
2383static const struct rfkill_ops hci_rfkill_ops = {
2384 .set_block = hci_rfkill_set_block,
2385};
2386
ab81cbf9
JH
2387static void hci_power_on(struct work_struct *work)
2388{
2389 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2390 int err;
ab81cbf9
JH
2391
2392 BT_DBG("%s", hdev->name);
2393
cbed0ca1 2394 err = hci_dev_do_open(hdev);
96570ffc
JH
2395 if (err < 0) {
2396 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2397 return;
96570ffc 2398 }
ab81cbf9 2399
a5c8f270
MH
2400 /* During the HCI setup phase, a few error conditions are
2401 * ignored and they need to be checked now. If they are still
2402 * valid, it is important to turn the device back off.
2403 */
2404 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2405 (hdev->dev_type == HCI_BREDR &&
2406 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2407 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2408 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2409 hci_dev_do_close(hdev);
2410 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2411 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2412 HCI_AUTO_OFF_TIMEOUT);
bf543036 2413 }
ab81cbf9 2414
a8b2d5c2 2415 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2416 mgmt_index_added(hdev);
ab81cbf9
JH
2417}
2418
2419static void hci_power_off(struct work_struct *work)
2420{
3243553f 2421 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2422 power_off.work);
ab81cbf9
JH
2423
2424 BT_DBG("%s", hdev->name);
2425
8ee56540 2426 hci_dev_do_close(hdev);
ab81cbf9
JH
2427}
2428
16ab91ab
JH
2429static void hci_discov_off(struct work_struct *work)
2430{
2431 struct hci_dev *hdev;
16ab91ab
JH
2432
2433 hdev = container_of(work, struct hci_dev, discov_off.work);
2434
2435 BT_DBG("%s", hdev->name);
2436
d1967ff8 2437 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2438}
2439
2aeb9a1a
JH
2440int hci_uuids_clear(struct hci_dev *hdev)
2441{
4821002c 2442 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2443
4821002c
JH
2444 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2445 list_del(&uuid->list);
2aeb9a1a
JH
2446 kfree(uuid);
2447 }
2448
2449 return 0;
2450}
2451
55ed8ca1
JH
2452int hci_link_keys_clear(struct hci_dev *hdev)
2453{
2454 struct list_head *p, *n;
2455
2456 list_for_each_safe(p, n, &hdev->link_keys) {
2457 struct link_key *key;
2458
2459 key = list_entry(p, struct link_key, list);
2460
2461 list_del(p);
2462 kfree(key);
2463 }
2464
2465 return 0;
2466}
2467
b899efaf
VCG
2468int hci_smp_ltks_clear(struct hci_dev *hdev)
2469{
2470 struct smp_ltk *k, *tmp;
2471
2472 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2473 list_del(&k->list);
2474 kfree(k);
2475 }
2476
2477 return 0;
2478}
2479
55ed8ca1
JH
2480struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2481{
8035ded4 2482 struct link_key *k;
55ed8ca1 2483
8035ded4 2484 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2485 if (bacmp(bdaddr, &k->bdaddr) == 0)
2486 return k;
55ed8ca1
JH
2487
2488 return NULL;
2489}
2490
745c0ce3 2491static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2492 u8 key_type, u8 old_key_type)
d25e28ab
JH
2493{
2494 /* Legacy key */
2495 if (key_type < 0x03)
745c0ce3 2496 return true;
d25e28ab
JH
2497
2498 /* Debug keys are insecure so don't store them persistently */
2499 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2500 return false;
d25e28ab
JH
2501
2502 /* Changed combination key and there's no previous one */
2503 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2504 return false;
d25e28ab
JH
2505
2506 /* Security mode 3 case */
2507 if (!conn)
745c0ce3 2508 return true;
d25e28ab
JH
2509
2510 /* Neither local nor remote side had no-bonding as requirement */
2511 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2512 return true;
d25e28ab
JH
2513
2514 /* Local side had dedicated bonding as requirement */
2515 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2516 return true;
d25e28ab
JH
2517
2518 /* Remote side had dedicated bonding as requirement */
2519 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2520 return true;
d25e28ab
JH
2521
2522 /* If none of the above criteria match, then don't store the key
2523 * persistently */
745c0ce3 2524 return false;
d25e28ab
JH
2525}
2526
c9839a11 2527struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 2528{
c9839a11 2529 struct smp_ltk *k;
75d262c2 2530
c9839a11
VCG
2531 list_for_each_entry(k, &hdev->long_term_keys, list) {
2532 if (k->ediv != ediv ||
a8c5fb1a 2533 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2534 continue;
2535
c9839a11 2536 return k;
75d262c2
VCG
2537 }
2538
2539 return NULL;
2540}
75d262c2 2541
c9839a11 2542struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 2543 u8 addr_type)
75d262c2 2544{
c9839a11 2545 struct smp_ltk *k;
75d262c2 2546
c9839a11
VCG
2547 list_for_each_entry(k, &hdev->long_term_keys, list)
2548 if (addr_type == k->bdaddr_type &&
a8c5fb1a 2549 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
2550 return k;
2551
2552 return NULL;
2553}
75d262c2 2554
d25e28ab 2555int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2556 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2557{
2558 struct link_key *key, *old_key;
745c0ce3
VA
2559 u8 old_key_type;
2560 bool persistent;
55ed8ca1
JH
2561
2562 old_key = hci_find_link_key(hdev, bdaddr);
2563 if (old_key) {
2564 old_key_type = old_key->type;
2565 key = old_key;
2566 } else {
12adcf3a 2567 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
2568 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2569 if (!key)
2570 return -ENOMEM;
2571 list_add(&key->list, &hdev->link_keys);
2572 }
2573
6ed93dc6 2574 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2575
d25e28ab
JH
2576 /* Some buggy controller combinations generate a changed
2577 * combination key for legacy pairing even when there's no
2578 * previous key */
2579 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2580 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2581 type = HCI_LK_COMBINATION;
655fe6ec
JH
2582 if (conn)
2583 conn->key_type = type;
2584 }
d25e28ab 2585
55ed8ca1 2586 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2587 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2588 key->pin_len = pin_len;
2589
b6020ba0 2590 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2591 key->type = old_key_type;
4748fed2
JH
2592 else
2593 key->type = type;
2594
4df378a1
JH
2595 if (!new_key)
2596 return 0;
2597
2598 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2599
744cf19e 2600 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2601
6ec5bcad
VA
2602 if (conn)
2603 conn->flush_key = !persistent;
55ed8ca1
JH
2604
2605 return 0;
2606}
2607
c9839a11 2608int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 2609 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 2610 ediv, u8 rand[8])
75d262c2 2611{
c9839a11 2612 struct smp_ltk *key, *old_key;
75d262c2 2613
c9839a11
VCG
2614 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2615 return 0;
75d262c2 2616
c9839a11
VCG
2617 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2618 if (old_key)
75d262c2 2619 key = old_key;
c9839a11
VCG
2620 else {
2621 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
2622 if (!key)
2623 return -ENOMEM;
c9839a11 2624 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2625 }
2626
75d262c2 2627 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2628 key->bdaddr_type = addr_type;
2629 memcpy(key->val, tk, sizeof(key->val));
2630 key->authenticated = authenticated;
2631 key->ediv = ediv;
2632 key->enc_size = enc_size;
2633 key->type = type;
2634 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2635
c9839a11
VCG
2636 if (!new_key)
2637 return 0;
75d262c2 2638
261cc5aa
VCG
2639 if (type & HCI_SMP_LTK)
2640 mgmt_new_ltk(hdev, key, 1);
2641
75d262c2
VCG
2642 return 0;
2643}
2644
55ed8ca1
JH
2645int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2646{
2647 struct link_key *key;
2648
2649 key = hci_find_link_key(hdev, bdaddr);
2650 if (!key)
2651 return -ENOENT;
2652
6ed93dc6 2653 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2654
2655 list_del(&key->list);
2656 kfree(key);
2657
2658 return 0;
2659}
2660
b899efaf
VCG
2661int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2662{
2663 struct smp_ltk *k, *tmp;
2664
2665 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2666 if (bacmp(bdaddr, &k->bdaddr))
2667 continue;
2668
6ed93dc6 2669 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2670
2671 list_del(&k->list);
2672 kfree(k);
2673 }
2674
2675 return 0;
2676}
2677
6bd32326 2678/* HCI command timer function */
bda4f23a 2679static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2680{
2681 struct hci_dev *hdev = (void *) arg;
2682
bda4f23a
AE
2683 if (hdev->sent_cmd) {
2684 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2685 u16 opcode = __le16_to_cpu(sent->opcode);
2686
2687 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2688 } else {
2689 BT_ERR("%s command tx timeout", hdev->name);
2690 }
2691
6bd32326 2692 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2693 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2694}
2695
2763eda6 2696struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2697 bdaddr_t *bdaddr)
2763eda6
SJ
2698{
2699 struct oob_data *data;
2700
2701 list_for_each_entry(data, &hdev->remote_oob_data, list)
2702 if (bacmp(bdaddr, &data->bdaddr) == 0)
2703 return data;
2704
2705 return NULL;
2706}
2707
2708int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2709{
2710 struct oob_data *data;
2711
2712 data = hci_find_remote_oob_data(hdev, bdaddr);
2713 if (!data)
2714 return -ENOENT;
2715
6ed93dc6 2716 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2717
2718 list_del(&data->list);
2719 kfree(data);
2720
2721 return 0;
2722}
2723
2724int hci_remote_oob_data_clear(struct hci_dev *hdev)
2725{
2726 struct oob_data *data, *n;
2727
2728 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2729 list_del(&data->list);
2730 kfree(data);
2731 }
2732
2733 return 0;
2734}
2735
2736int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 2737 u8 *randomizer)
2763eda6
SJ
2738{
2739 struct oob_data *data;
2740
2741 data = hci_find_remote_oob_data(hdev, bdaddr);
2742
2743 if (!data) {
2744 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2745 if (!data)
2746 return -ENOMEM;
2747
2748 bacpy(&data->bdaddr, bdaddr);
2749 list_add(&data->list, &hdev->remote_oob_data);
2750 }
2751
2752 memcpy(data->hash, hash, sizeof(data->hash));
2753 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2754
6ed93dc6 2755 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2756
2757 return 0;
2758}
2759
b9ee0a78
MH
2760struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2761 bdaddr_t *bdaddr, u8 type)
b2a66aad 2762{
8035ded4 2763 struct bdaddr_list *b;
b2a66aad 2764
b9ee0a78
MH
2765 list_for_each_entry(b, &hdev->blacklist, list) {
2766 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2767 return b;
b9ee0a78 2768 }
b2a66aad
AJ
2769
2770 return NULL;
2771}
2772
2773int hci_blacklist_clear(struct hci_dev *hdev)
2774{
2775 struct list_head *p, *n;
2776
2777 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 2778 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2779
2780 list_del(p);
2781 kfree(b);
2782 }
2783
2784 return 0;
2785}
2786
88c1fe4b 2787int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2788{
2789 struct bdaddr_list *entry;
b2a66aad 2790
b9ee0a78 2791 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2792 return -EBADF;
2793
b9ee0a78 2794 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 2795 return -EEXIST;
b2a66aad
AJ
2796
2797 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2798 if (!entry)
2799 return -ENOMEM;
b2a66aad
AJ
2800
2801 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2802 entry->bdaddr_type = type;
b2a66aad
AJ
2803
2804 list_add(&entry->list, &hdev->blacklist);
2805
88c1fe4b 2806 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2807}
2808
88c1fe4b 2809int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2810{
2811 struct bdaddr_list *entry;
b2a66aad 2812
b9ee0a78 2813 if (!bacmp(bdaddr, BDADDR_ANY))
5e762444 2814 return hci_blacklist_clear(hdev);
b2a66aad 2815
b9ee0a78 2816 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 2817 if (!entry)
5e762444 2818 return -ENOENT;
b2a66aad
AJ
2819
2820 list_del(&entry->list);
2821 kfree(entry);
2822
88c1fe4b 2823 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2824}
2825
4c87eaab 2826static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2827{
4c87eaab
AG
2828 if (status) {
2829 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2830
4c87eaab
AG
2831 hci_dev_lock(hdev);
2832 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2833 hci_dev_unlock(hdev);
2834 return;
2835 }
7ba8b4be
AG
2836}
2837
4c87eaab 2838static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2839{
4c87eaab
AG
2840 /* General inquiry access code (GIAC) */
2841 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2842 struct hci_request req;
2843 struct hci_cp_inquiry cp;
7ba8b4be
AG
2844 int err;
2845
4c87eaab
AG
2846 if (status) {
2847 BT_ERR("Failed to disable LE scanning: status %d", status);
2848 return;
2849 }
7ba8b4be 2850
4c87eaab
AG
2851 switch (hdev->discovery.type) {
2852 case DISCOV_TYPE_LE:
2853 hci_dev_lock(hdev);
2854 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2855 hci_dev_unlock(hdev);
2856 break;
7ba8b4be 2857
4c87eaab
AG
2858 case DISCOV_TYPE_INTERLEAVED:
2859 hci_req_init(&req, hdev);
7ba8b4be 2860
4c87eaab
AG
2861 memset(&cp, 0, sizeof(cp));
2862 memcpy(&cp.lap, lap, sizeof(cp.lap));
2863 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2864 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2865
4c87eaab 2866 hci_dev_lock(hdev);
7dbfac1d 2867
4c87eaab 2868 hci_inquiry_cache_flush(hdev);
7dbfac1d 2869
4c87eaab
AG
2870 err = hci_req_run(&req, inquiry_complete);
2871 if (err) {
2872 BT_ERR("Inquiry request failed: err %d", err);
2873 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2874 }
7dbfac1d 2875
4c87eaab
AG
2876 hci_dev_unlock(hdev);
2877 break;
7dbfac1d 2878 }
7dbfac1d
AG
2879}
2880
7ba8b4be
AG
2881static void le_scan_disable_work(struct work_struct *work)
2882{
2883 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2884 le_scan_disable.work);
7ba8b4be 2885 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
2886 struct hci_request req;
2887 int err;
7ba8b4be
AG
2888
2889 BT_DBG("%s", hdev->name);
2890
4c87eaab 2891 hci_req_init(&req, hdev);
28b75a89 2892
7ba8b4be 2893 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
2894 cp.enable = LE_SCAN_DISABLE;
2895 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 2896
4c87eaab
AG
2897 err = hci_req_run(&req, le_scan_disable_work_complete);
2898 if (err)
2899 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2900}
2901
9be0dab7
DH
2902/* Alloc HCI device */
2903struct hci_dev *hci_alloc_dev(void)
2904{
2905 struct hci_dev *hdev;
2906
2907 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2908 if (!hdev)
2909 return NULL;
2910
b1b813d4
DH
2911 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2912 hdev->esco_type = (ESCO_HV1);
2913 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2914 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2915 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2916 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2917 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2918
b1b813d4
DH
2919 hdev->sniff_max_interval = 800;
2920 hdev->sniff_min_interval = 80;
2921
bef64738
MH
2922 hdev->le_scan_interval = 0x0060;
2923 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
2924 hdev->le_conn_min_interval = 0x0028;
2925 hdev->le_conn_max_interval = 0x0038;
bef64738 2926
b1b813d4
DH
2927 mutex_init(&hdev->lock);
2928 mutex_init(&hdev->req_lock);
2929
2930 INIT_LIST_HEAD(&hdev->mgmt_pending);
2931 INIT_LIST_HEAD(&hdev->blacklist);
2932 INIT_LIST_HEAD(&hdev->uuids);
2933 INIT_LIST_HEAD(&hdev->link_keys);
2934 INIT_LIST_HEAD(&hdev->long_term_keys);
2935 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2936 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2937
2938 INIT_WORK(&hdev->rx_work, hci_rx_work);
2939 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2940 INIT_WORK(&hdev->tx_work, hci_tx_work);
2941 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2942
b1b813d4
DH
2943 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2944 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2945 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2946
b1b813d4
DH
2947 skb_queue_head_init(&hdev->rx_q);
2948 skb_queue_head_init(&hdev->cmd_q);
2949 skb_queue_head_init(&hdev->raw_q);
2950
2951 init_waitqueue_head(&hdev->req_wait_q);
2952
bda4f23a 2953 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2954
b1b813d4
DH
2955 hci_init_sysfs(hdev);
2956 discovery_init(hdev);
9be0dab7
DH
2957
2958 return hdev;
2959}
2960EXPORT_SYMBOL(hci_alloc_dev);
2961
2962/* Free HCI device */
2963void hci_free_dev(struct hci_dev *hdev)
2964{
9be0dab7
DH
2965 /* will free via device release */
2966 put_device(&hdev->dev);
2967}
2968EXPORT_SYMBOL(hci_free_dev);
2969
1da177e4
LT
2970/* Register HCI device */
2971int hci_register_dev(struct hci_dev *hdev)
2972{
b1b813d4 2973 int id, error;
1da177e4 2974
010666a1 2975 if (!hdev->open || !hdev->close)
1da177e4
LT
2976 return -EINVAL;
2977
08add513
MM
2978 /* Do not allow HCI_AMP devices to register at index 0,
2979 * so the index can be used as the AMP controller ID.
2980 */
3df92b31
SL
2981 switch (hdev->dev_type) {
2982 case HCI_BREDR:
2983 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2984 break;
2985 case HCI_AMP:
2986 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2987 break;
2988 default:
2989 return -EINVAL;
1da177e4 2990 }
8e87d142 2991
3df92b31
SL
2992 if (id < 0)
2993 return id;
2994
1da177e4
LT
2995 sprintf(hdev->name, "hci%d", id);
2996 hdev->id = id;
2d8b3a11
AE
2997
2998 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2999
d8537548
KC
3000 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3001 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3002 if (!hdev->workqueue) {
3003 error = -ENOMEM;
3004 goto err;
3005 }
f48fd9c8 3006
d8537548
KC
3007 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3008 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3009 if (!hdev->req_workqueue) {
3010 destroy_workqueue(hdev->workqueue);
3011 error = -ENOMEM;
3012 goto err;
3013 }
3014
0153e2ec
MH
3015 if (!IS_ERR_OR_NULL(bt_debugfs))
3016 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3017
bdc3e0f1
MH
3018 dev_set_name(&hdev->dev, "%s", hdev->name);
3019
3020 error = device_add(&hdev->dev);
33ca954d
DH
3021 if (error < 0)
3022 goto err_wqueue;
1da177e4 3023
611b30f7 3024 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3025 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3026 hdev);
611b30f7
MH
3027 if (hdev->rfkill) {
3028 if (rfkill_register(hdev->rfkill) < 0) {
3029 rfkill_destroy(hdev->rfkill);
3030 hdev->rfkill = NULL;
3031 }
3032 }
3033
5e130367
JH
3034 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3035 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3036
a8b2d5c2 3037 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3038 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3039
01cd3404 3040 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3041 /* Assume BR/EDR support until proven otherwise (such as
3042 * through reading supported features during init.
3043 */
3044 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3045 }
ce2be9ac 3046
fcee3377
GP
3047 write_lock(&hci_dev_list_lock);
3048 list_add(&hdev->list, &hci_dev_list);
3049 write_unlock(&hci_dev_list_lock);
3050
1da177e4 3051 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3052 hci_dev_hold(hdev);
1da177e4 3053
19202573 3054 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3055
1da177e4 3056 return id;
f48fd9c8 3057
33ca954d
DH
3058err_wqueue:
3059 destroy_workqueue(hdev->workqueue);
6ead1bbc 3060 destroy_workqueue(hdev->req_workqueue);
33ca954d 3061err:
3df92b31 3062 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3063
33ca954d 3064 return error;
1da177e4
LT
3065}
3066EXPORT_SYMBOL(hci_register_dev);
3067
3068/* Unregister HCI device */
59735631 3069void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3070{
3df92b31 3071 int i, id;
ef222013 3072
c13854ce 3073 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3074
94324962
JH
3075 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3076
3df92b31
SL
3077 id = hdev->id;
3078
f20d09d5 3079 write_lock(&hci_dev_list_lock);
1da177e4 3080 list_del(&hdev->list);
f20d09d5 3081 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3082
3083 hci_dev_do_close(hdev);
3084
cd4c5391 3085 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3086 kfree_skb(hdev->reassembly[i]);
3087
b9b5ef18
GP
3088 cancel_work_sync(&hdev->power_on);
3089
ab81cbf9 3090 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3091 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3092 hci_dev_lock(hdev);
744cf19e 3093 mgmt_index_removed(hdev);
09fd0de5 3094 hci_dev_unlock(hdev);
56e5cb86 3095 }
ab81cbf9 3096
2e58ef3e
JH
3097 /* mgmt_index_removed should take care of emptying the
3098 * pending list */
3099 BUG_ON(!list_empty(&hdev->mgmt_pending));
3100
1da177e4
LT
3101 hci_notify(hdev, HCI_DEV_UNREG);
3102
611b30f7
MH
3103 if (hdev->rfkill) {
3104 rfkill_unregister(hdev->rfkill);
3105 rfkill_destroy(hdev->rfkill);
3106 }
3107
bdc3e0f1 3108 device_del(&hdev->dev);
147e2d59 3109
0153e2ec
MH
3110 debugfs_remove_recursive(hdev->debugfs);
3111
f48fd9c8 3112 destroy_workqueue(hdev->workqueue);
6ead1bbc 3113 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3114
09fd0de5 3115 hci_dev_lock(hdev);
e2e0cacb 3116 hci_blacklist_clear(hdev);
2aeb9a1a 3117 hci_uuids_clear(hdev);
55ed8ca1 3118 hci_link_keys_clear(hdev);
b899efaf 3119 hci_smp_ltks_clear(hdev);
2763eda6 3120 hci_remote_oob_data_clear(hdev);
09fd0de5 3121 hci_dev_unlock(hdev);
e2e0cacb 3122
dc946bd8 3123 hci_dev_put(hdev);
3df92b31
SL
3124
3125 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3126}
3127EXPORT_SYMBOL(hci_unregister_dev);
3128
3129/* Suspend HCI device */
3130int hci_suspend_dev(struct hci_dev *hdev)
3131{
3132 hci_notify(hdev, HCI_DEV_SUSPEND);
3133 return 0;
3134}
3135EXPORT_SYMBOL(hci_suspend_dev);
3136
3137/* Resume HCI device */
3138int hci_resume_dev(struct hci_dev *hdev)
3139{
3140 hci_notify(hdev, HCI_DEV_RESUME);
3141 return 0;
3142}
3143EXPORT_SYMBOL(hci_resume_dev);
3144
76bca880 3145/* Receive frame from HCI drivers */
e1a26170 3146int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3147{
76bca880 3148 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3149 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3150 kfree_skb(skb);
3151 return -ENXIO;
3152 }
3153
d82603c6 3154 /* Incoming skb */
76bca880
MH
3155 bt_cb(skb)->incoming = 1;
3156
3157 /* Time stamp */
3158 __net_timestamp(skb);
3159
76bca880 3160 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3161 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3162
76bca880
MH
3163 return 0;
3164}
3165EXPORT_SYMBOL(hci_recv_frame);
3166
33e882a5 3167static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3168 int count, __u8 index)
33e882a5
SS
3169{
3170 int len = 0;
3171 int hlen = 0;
3172 int remain = count;
3173 struct sk_buff *skb;
3174 struct bt_skb_cb *scb;
3175
3176 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3177 index >= NUM_REASSEMBLY)
33e882a5
SS
3178 return -EILSEQ;
3179
3180 skb = hdev->reassembly[index];
3181
3182 if (!skb) {
3183 switch (type) {
3184 case HCI_ACLDATA_PKT:
3185 len = HCI_MAX_FRAME_SIZE;
3186 hlen = HCI_ACL_HDR_SIZE;
3187 break;
3188 case HCI_EVENT_PKT:
3189 len = HCI_MAX_EVENT_SIZE;
3190 hlen = HCI_EVENT_HDR_SIZE;
3191 break;
3192 case HCI_SCODATA_PKT:
3193 len = HCI_MAX_SCO_SIZE;
3194 hlen = HCI_SCO_HDR_SIZE;
3195 break;
3196 }
3197
1e429f38 3198 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3199 if (!skb)
3200 return -ENOMEM;
3201
3202 scb = (void *) skb->cb;
3203 scb->expect = hlen;
3204 scb->pkt_type = type;
3205
33e882a5
SS
3206 hdev->reassembly[index] = skb;
3207 }
3208
3209 while (count) {
3210 scb = (void *) skb->cb;
89bb46d0 3211 len = min_t(uint, scb->expect, count);
33e882a5
SS
3212
3213 memcpy(skb_put(skb, len), data, len);
3214
3215 count -= len;
3216 data += len;
3217 scb->expect -= len;
3218 remain = count;
3219
3220 switch (type) {
3221 case HCI_EVENT_PKT:
3222 if (skb->len == HCI_EVENT_HDR_SIZE) {
3223 struct hci_event_hdr *h = hci_event_hdr(skb);
3224 scb->expect = h->plen;
3225
3226 if (skb_tailroom(skb) < scb->expect) {
3227 kfree_skb(skb);
3228 hdev->reassembly[index] = NULL;
3229 return -ENOMEM;
3230 }
3231 }
3232 break;
3233
3234 case HCI_ACLDATA_PKT:
3235 if (skb->len == HCI_ACL_HDR_SIZE) {
3236 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3237 scb->expect = __le16_to_cpu(h->dlen);
3238
3239 if (skb_tailroom(skb) < scb->expect) {
3240 kfree_skb(skb);
3241 hdev->reassembly[index] = NULL;
3242 return -ENOMEM;
3243 }
3244 }
3245 break;
3246
3247 case HCI_SCODATA_PKT:
3248 if (skb->len == HCI_SCO_HDR_SIZE) {
3249 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3250 scb->expect = h->dlen;
3251
3252 if (skb_tailroom(skb) < scb->expect) {
3253 kfree_skb(skb);
3254 hdev->reassembly[index] = NULL;
3255 return -ENOMEM;
3256 }
3257 }
3258 break;
3259 }
3260
3261 if (scb->expect == 0) {
3262 /* Complete frame */
3263
3264 bt_cb(skb)->pkt_type = type;
e1a26170 3265 hci_recv_frame(hdev, skb);
33e882a5
SS
3266
3267 hdev->reassembly[index] = NULL;
3268 return remain;
3269 }
3270 }
3271
3272 return remain;
3273}
3274
ef222013
MH
3275int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3276{
f39a3c06
SS
3277 int rem = 0;
3278
ef222013
MH
3279 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3280 return -EILSEQ;
3281
da5f6c37 3282 while (count) {
1e429f38 3283 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
3284 if (rem < 0)
3285 return rem;
ef222013 3286
f39a3c06
SS
3287 data += (count - rem);
3288 count = rem;
f81c6224 3289 }
ef222013 3290
f39a3c06 3291 return rem;
ef222013
MH
3292}
3293EXPORT_SYMBOL(hci_recv_fragment);
3294
99811510
SS
3295#define STREAM_REASSEMBLY 0
3296
3297int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3298{
3299 int type;
3300 int rem = 0;
3301
da5f6c37 3302 while (count) {
99811510
SS
3303 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3304
3305 if (!skb) {
3306 struct { char type; } *pkt;
3307
3308 /* Start of the frame */
3309 pkt = data;
3310 type = pkt->type;
3311
3312 data++;
3313 count--;
3314 } else
3315 type = bt_cb(skb)->pkt_type;
3316
1e429f38 3317 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3318 STREAM_REASSEMBLY);
99811510
SS
3319 if (rem < 0)
3320 return rem;
3321
3322 data += (count - rem);
3323 count = rem;
f81c6224 3324 }
99811510
SS
3325
3326 return rem;
3327}
3328EXPORT_SYMBOL(hci_recv_stream_fragment);
3329
1da177e4
LT
3330/* ---- Interface to upper protocols ---- */
3331
1da177e4
LT
3332int hci_register_cb(struct hci_cb *cb)
3333{
3334 BT_DBG("%p name %s", cb, cb->name);
3335
f20d09d5 3336 write_lock(&hci_cb_list_lock);
1da177e4 3337 list_add(&cb->list, &hci_cb_list);
f20d09d5 3338 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3339
3340 return 0;
3341}
3342EXPORT_SYMBOL(hci_register_cb);
3343
3344int hci_unregister_cb(struct hci_cb *cb)
3345{
3346 BT_DBG("%p name %s", cb, cb->name);
3347
f20d09d5 3348 write_lock(&hci_cb_list_lock);
1da177e4 3349 list_del(&cb->list);
f20d09d5 3350 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3351
3352 return 0;
3353}
3354EXPORT_SYMBOL(hci_unregister_cb);
3355
51086991 3356static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3357{
0d48d939 3358 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3359
cd82e61c
MH
3360 /* Time stamp */
3361 __net_timestamp(skb);
1da177e4 3362
cd82e61c
MH
3363 /* Send copy to monitor */
3364 hci_send_to_monitor(hdev, skb);
3365
3366 if (atomic_read(&hdev->promisc)) {
3367 /* Send copy to the sockets */
470fe1b5 3368 hci_send_to_sock(hdev, skb);
1da177e4
LT
3369 }
3370
3371 /* Get rid of skb owner, prior to sending to the driver. */
3372 skb_orphan(skb);
3373
7bd8f09f 3374 if (hdev->send(hdev, skb) < 0)
51086991 3375 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
3376}
3377
3119ae95
JH
3378void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3379{
3380 skb_queue_head_init(&req->cmd_q);
3381 req->hdev = hdev;
5d73e034 3382 req->err = 0;
3119ae95
JH
3383}
3384
3385int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3386{
3387 struct hci_dev *hdev = req->hdev;
3388 struct sk_buff *skb;
3389 unsigned long flags;
3390
3391 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3392
5d73e034
AG
3393 /* If an error occured during request building, remove all HCI
3394 * commands queued on the HCI request queue.
3395 */
3396 if (req->err) {
3397 skb_queue_purge(&req->cmd_q);
3398 return req->err;
3399 }
3400
3119ae95
JH
3401 /* Do not allow empty requests */
3402 if (skb_queue_empty(&req->cmd_q))
382b0c39 3403 return -ENODATA;
3119ae95
JH
3404
3405 skb = skb_peek_tail(&req->cmd_q);
3406 bt_cb(skb)->req.complete = complete;
3407
3408 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3409 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3410 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3411
3412 queue_work(hdev->workqueue, &hdev->cmd_work);
3413
3414 return 0;
3415}
3416
1ca3a9d0 3417static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 3418 u32 plen, const void *param)
1da177e4
LT
3419{
3420 int len = HCI_COMMAND_HDR_SIZE + plen;
3421 struct hci_command_hdr *hdr;
3422 struct sk_buff *skb;
3423
1da177e4 3424 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
3425 if (!skb)
3426 return NULL;
1da177e4
LT
3427
3428 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 3429 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
3430 hdr->plen = plen;
3431
3432 if (plen)
3433 memcpy(skb_put(skb, plen), param, plen);
3434
3435 BT_DBG("skb len %d", skb->len);
3436
0d48d939 3437 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 3438
1ca3a9d0
JH
3439 return skb;
3440}
3441
3442/* Send HCI command */
07dc93dd
JH
3443int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3444 const void *param)
1ca3a9d0
JH
3445{
3446 struct sk_buff *skb;
3447
3448 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3449
3450 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3451 if (!skb) {
3452 BT_ERR("%s no memory for command", hdev->name);
3453 return -ENOMEM;
3454 }
3455
11714b3d
JH
3456 /* Stand-alone HCI commands must be flaged as
3457 * single-command requests.
3458 */
3459 bt_cb(skb)->req.start = true;
3460
1da177e4 3461 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3462 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3463
3464 return 0;
3465}
1da177e4 3466
71c76a17 3467/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
3468void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3469 const void *param, u8 event)
71c76a17
JH
3470{
3471 struct hci_dev *hdev = req->hdev;
3472 struct sk_buff *skb;
3473
3474 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3475
34739c1e
AG
3476 /* If an error occured during request building, there is no point in
3477 * queueing the HCI command. We can simply return.
3478 */
3479 if (req->err)
3480 return;
3481
71c76a17
JH
3482 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3483 if (!skb) {
5d73e034
AG
3484 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3485 hdev->name, opcode);
3486 req->err = -ENOMEM;
e348fe6b 3487 return;
71c76a17
JH
3488 }
3489
3490 if (skb_queue_empty(&req->cmd_q))
3491 bt_cb(skb)->req.start = true;
3492
02350a72
JH
3493 bt_cb(skb)->req.event = event;
3494
71c76a17 3495 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
3496}
3497
07dc93dd
JH
3498void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3499 const void *param)
02350a72
JH
3500{
3501 hci_req_add_ev(req, opcode, plen, param, 0);
3502}
3503
1da177e4 3504/* Get data from the previously sent command */
a9de9248 3505void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3506{
3507 struct hci_command_hdr *hdr;
3508
3509 if (!hdev->sent_cmd)
3510 return NULL;
3511
3512 hdr = (void *) hdev->sent_cmd->data;
3513
a9de9248 3514 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3515 return NULL;
3516
f0e09510 3517 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3518
3519 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3520}
3521
3522/* Send ACL data */
3523static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3524{
3525 struct hci_acl_hdr *hdr;
3526 int len = skb->len;
3527
badff6d0
ACM
3528 skb_push(skb, HCI_ACL_HDR_SIZE);
3529 skb_reset_transport_header(skb);
9c70220b 3530 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3531 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3532 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3533}
3534
ee22be7e 3535static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3536 struct sk_buff *skb, __u16 flags)
1da177e4 3537{
ee22be7e 3538 struct hci_conn *conn = chan->conn;
1da177e4
LT
3539 struct hci_dev *hdev = conn->hdev;
3540 struct sk_buff *list;
3541
087bfd99
GP
3542 skb->len = skb_headlen(skb);
3543 skb->data_len = 0;
3544
3545 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3546
3547 switch (hdev->dev_type) {
3548 case HCI_BREDR:
3549 hci_add_acl_hdr(skb, conn->handle, flags);
3550 break;
3551 case HCI_AMP:
3552 hci_add_acl_hdr(skb, chan->handle, flags);
3553 break;
3554 default:
3555 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3556 return;
3557 }
087bfd99 3558
70f23020
AE
3559 list = skb_shinfo(skb)->frag_list;
3560 if (!list) {
1da177e4
LT
3561 /* Non fragmented */
3562 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3563
73d80deb 3564 skb_queue_tail(queue, skb);
1da177e4
LT
3565 } else {
3566 /* Fragmented */
3567 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3568
3569 skb_shinfo(skb)->frag_list = NULL;
3570
3571 /* Queue all fragments atomically */
af3e6359 3572 spin_lock(&queue->lock);
1da177e4 3573
73d80deb 3574 __skb_queue_tail(queue, skb);
e702112f
AE
3575
3576 flags &= ~ACL_START;
3577 flags |= ACL_CONT;
1da177e4
LT
3578 do {
3579 skb = list; list = list->next;
8e87d142 3580
0d48d939 3581 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3582 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3583
3584 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3585
73d80deb 3586 __skb_queue_tail(queue, skb);
1da177e4
LT
3587 } while (list);
3588
af3e6359 3589 spin_unlock(&queue->lock);
1da177e4 3590 }
73d80deb
LAD
3591}
3592
3593void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3594{
ee22be7e 3595 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3596
f0e09510 3597 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3598
ee22be7e 3599 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3600
3eff45ea 3601 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3602}
1da177e4
LT
3603
3604/* Send SCO data */
0d861d8b 3605void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3606{
3607 struct hci_dev *hdev = conn->hdev;
3608 struct hci_sco_hdr hdr;
3609
3610 BT_DBG("%s len %d", hdev->name, skb->len);
3611
aca3192c 3612 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3613 hdr.dlen = skb->len;
3614
badff6d0
ACM
3615 skb_push(skb, HCI_SCO_HDR_SIZE);
3616 skb_reset_transport_header(skb);
9c70220b 3617 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3618
0d48d939 3619 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3620
1da177e4 3621 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3622 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3623}
1da177e4
LT
3624
3625/* ---- HCI TX task (outgoing data) ---- */
3626
3627/* HCI Connection scheduler */
6039aa73
GP
3628static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3629 int *quote)
1da177e4
LT
3630{
3631 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3632 struct hci_conn *conn = NULL, *c;
abc5de8f 3633 unsigned int num = 0, min = ~0;
1da177e4 3634
8e87d142 3635 /* We don't have to lock device here. Connections are always
1da177e4 3636 * added and removed with TX task disabled. */
bf4c6325
GP
3637
3638 rcu_read_lock();
3639
3640 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3641 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3642 continue;
769be974
MH
3643
3644 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3645 continue;
3646
1da177e4
LT
3647 num++;
3648
3649 if (c->sent < min) {
3650 min = c->sent;
3651 conn = c;
3652 }
52087a79
LAD
3653
3654 if (hci_conn_num(hdev, type) == num)
3655 break;
1da177e4
LT
3656 }
3657
bf4c6325
GP
3658 rcu_read_unlock();
3659
1da177e4 3660 if (conn) {
6ed58ec5
VT
3661 int cnt, q;
3662
3663 switch (conn->type) {
3664 case ACL_LINK:
3665 cnt = hdev->acl_cnt;
3666 break;
3667 case SCO_LINK:
3668 case ESCO_LINK:
3669 cnt = hdev->sco_cnt;
3670 break;
3671 case LE_LINK:
3672 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3673 break;
3674 default:
3675 cnt = 0;
3676 BT_ERR("Unknown link type");
3677 }
3678
3679 q = cnt / num;
1da177e4
LT
3680 *quote = q ? q : 1;
3681 } else
3682 *quote = 0;
3683
3684 BT_DBG("conn %p quote %d", conn, *quote);
3685 return conn;
3686}
3687
6039aa73 3688static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3689{
3690 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3691 struct hci_conn *c;
1da177e4 3692
bae1f5d9 3693 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3694
bf4c6325
GP
3695 rcu_read_lock();
3696
1da177e4 3697 /* Kill stalled connections */
bf4c6325 3698 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3699 if (c->type == type && c->sent) {
6ed93dc6
AE
3700 BT_ERR("%s killing stalled connection %pMR",
3701 hdev->name, &c->dst);
bed71748 3702 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3703 }
3704 }
bf4c6325
GP
3705
3706 rcu_read_unlock();
1da177e4
LT
3707}
3708
6039aa73
GP
3709static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3710 int *quote)
1da177e4 3711{
73d80deb
LAD
3712 struct hci_conn_hash *h = &hdev->conn_hash;
3713 struct hci_chan *chan = NULL;
abc5de8f 3714 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3715 struct hci_conn *conn;
73d80deb
LAD
3716 int cnt, q, conn_num = 0;
3717
3718 BT_DBG("%s", hdev->name);
3719
bf4c6325
GP
3720 rcu_read_lock();
3721
3722 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3723 struct hci_chan *tmp;
3724
3725 if (conn->type != type)
3726 continue;
3727
3728 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3729 continue;
3730
3731 conn_num++;
3732
8192edef 3733 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3734 struct sk_buff *skb;
3735
3736 if (skb_queue_empty(&tmp->data_q))
3737 continue;
3738
3739 skb = skb_peek(&tmp->data_q);
3740 if (skb->priority < cur_prio)
3741 continue;
3742
3743 if (skb->priority > cur_prio) {
3744 num = 0;
3745 min = ~0;
3746 cur_prio = skb->priority;
3747 }
3748
3749 num++;
3750
3751 if (conn->sent < min) {
3752 min = conn->sent;
3753 chan = tmp;
3754 }
3755 }
3756
3757 if (hci_conn_num(hdev, type) == conn_num)
3758 break;
3759 }
3760
bf4c6325
GP
3761 rcu_read_unlock();
3762
73d80deb
LAD
3763 if (!chan)
3764 return NULL;
3765
3766 switch (chan->conn->type) {
3767 case ACL_LINK:
3768 cnt = hdev->acl_cnt;
3769 break;
bd1eb66b
AE
3770 case AMP_LINK:
3771 cnt = hdev->block_cnt;
3772 break;
73d80deb
LAD
3773 case SCO_LINK:
3774 case ESCO_LINK:
3775 cnt = hdev->sco_cnt;
3776 break;
3777 case LE_LINK:
3778 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3779 break;
3780 default:
3781 cnt = 0;
3782 BT_ERR("Unknown link type");
3783 }
3784
3785 q = cnt / num;
3786 *quote = q ? q : 1;
3787 BT_DBG("chan %p quote %d", chan, *quote);
3788 return chan;
3789}
3790
02b20f0b
LAD
3791static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3792{
3793 struct hci_conn_hash *h = &hdev->conn_hash;
3794 struct hci_conn *conn;
3795 int num = 0;
3796
3797 BT_DBG("%s", hdev->name);
3798
bf4c6325
GP
3799 rcu_read_lock();
3800
3801 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3802 struct hci_chan *chan;
3803
3804 if (conn->type != type)
3805 continue;
3806
3807 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3808 continue;
3809
3810 num++;
3811
8192edef 3812 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3813 struct sk_buff *skb;
3814
3815 if (chan->sent) {
3816 chan->sent = 0;
3817 continue;
3818 }
3819
3820 if (skb_queue_empty(&chan->data_q))
3821 continue;
3822
3823 skb = skb_peek(&chan->data_q);
3824 if (skb->priority >= HCI_PRIO_MAX - 1)
3825 continue;
3826
3827 skb->priority = HCI_PRIO_MAX - 1;
3828
3829 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3830 skb->priority);
02b20f0b
LAD
3831 }
3832
3833 if (hci_conn_num(hdev, type) == num)
3834 break;
3835 }
bf4c6325
GP
3836
3837 rcu_read_unlock();
3838
02b20f0b
LAD
3839}
3840
b71d385a
AE
3841static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3842{
3843 /* Calculate count of blocks used by this packet */
3844 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3845}
3846
6039aa73 3847static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3848{
1da177e4
LT
3849 if (!test_bit(HCI_RAW, &hdev->flags)) {
3850 /* ACL tx timeout must be longer than maximum
3851 * link supervision timeout (40.9 seconds) */
63d2bc1b 3852 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3853 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3854 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3855 }
63d2bc1b 3856}
1da177e4 3857
6039aa73 3858static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3859{
3860 unsigned int cnt = hdev->acl_cnt;
3861 struct hci_chan *chan;
3862 struct sk_buff *skb;
3863 int quote;
3864
3865 __check_timeout(hdev, cnt);
04837f64 3866
73d80deb 3867 while (hdev->acl_cnt &&
a8c5fb1a 3868 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3869 u32 priority = (skb_peek(&chan->data_q))->priority;
3870 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3871 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3872 skb->len, skb->priority);
73d80deb 3873
ec1cce24
LAD
3874 /* Stop if priority has changed */
3875 if (skb->priority < priority)
3876 break;
3877
3878 skb = skb_dequeue(&chan->data_q);
3879
73d80deb 3880 hci_conn_enter_active_mode(chan->conn,
04124681 3881 bt_cb(skb)->force_active);
04837f64 3882
57d17d70 3883 hci_send_frame(hdev, skb);
1da177e4
LT
3884 hdev->acl_last_tx = jiffies;
3885
3886 hdev->acl_cnt--;
73d80deb
LAD
3887 chan->sent++;
3888 chan->conn->sent++;
1da177e4
LT
3889 }
3890 }
02b20f0b
LAD
3891
3892 if (cnt != hdev->acl_cnt)
3893 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3894}
3895
6039aa73 3896static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3897{
63d2bc1b 3898 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3899 struct hci_chan *chan;
3900 struct sk_buff *skb;
3901 int quote;
bd1eb66b 3902 u8 type;
b71d385a 3903
63d2bc1b 3904 __check_timeout(hdev, cnt);
b71d385a 3905
bd1eb66b
AE
3906 BT_DBG("%s", hdev->name);
3907
3908 if (hdev->dev_type == HCI_AMP)
3909 type = AMP_LINK;
3910 else
3911 type = ACL_LINK;
3912
b71d385a 3913 while (hdev->block_cnt > 0 &&
bd1eb66b 3914 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3915 u32 priority = (skb_peek(&chan->data_q))->priority;
3916 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3917 int blocks;
3918
3919 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3920 skb->len, skb->priority);
b71d385a
AE
3921
3922 /* Stop if priority has changed */
3923 if (skb->priority < priority)
3924 break;
3925
3926 skb = skb_dequeue(&chan->data_q);
3927
3928 blocks = __get_blocks(hdev, skb);
3929 if (blocks > hdev->block_cnt)
3930 return;
3931
3932 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3933 bt_cb(skb)->force_active);
b71d385a 3934
57d17d70 3935 hci_send_frame(hdev, skb);
b71d385a
AE
3936 hdev->acl_last_tx = jiffies;
3937
3938 hdev->block_cnt -= blocks;
3939 quote -= blocks;
3940
3941 chan->sent += blocks;
3942 chan->conn->sent += blocks;
3943 }
3944 }
3945
3946 if (cnt != hdev->block_cnt)
bd1eb66b 3947 hci_prio_recalculate(hdev, type);
b71d385a
AE
3948}
3949
6039aa73 3950static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3951{
3952 BT_DBG("%s", hdev->name);
3953
bd1eb66b
AE
3954 /* No ACL link over BR/EDR controller */
3955 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3956 return;
3957
3958 /* No AMP link over AMP controller */
3959 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3960 return;
3961
3962 switch (hdev->flow_ctl_mode) {
3963 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3964 hci_sched_acl_pkt(hdev);
3965 break;
3966
3967 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3968 hci_sched_acl_blk(hdev);
3969 break;
3970 }
3971}
3972
1da177e4 3973/* Schedule SCO */
6039aa73 3974static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3975{
3976 struct hci_conn *conn;
3977 struct sk_buff *skb;
3978 int quote;
3979
3980 BT_DBG("%s", hdev->name);
3981
52087a79
LAD
3982 if (!hci_conn_num(hdev, SCO_LINK))
3983 return;
3984
1da177e4
LT
3985 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3986 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3987 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3988 hci_send_frame(hdev, skb);
1da177e4
LT
3989
3990 conn->sent++;
3991 if (conn->sent == ~0)
3992 conn->sent = 0;
3993 }
3994 }
3995}
3996
6039aa73 3997static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3998{
3999 struct hci_conn *conn;
4000 struct sk_buff *skb;
4001 int quote;
4002
4003 BT_DBG("%s", hdev->name);
4004
52087a79
LAD
4005 if (!hci_conn_num(hdev, ESCO_LINK))
4006 return;
4007
8fc9ced3
GP
4008 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4009 &quote))) {
b6a0dc82
MH
4010 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4011 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4012 hci_send_frame(hdev, skb);
b6a0dc82
MH
4013
4014 conn->sent++;
4015 if (conn->sent == ~0)
4016 conn->sent = 0;
4017 }
4018 }
4019}
4020
6039aa73 4021static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4022{
73d80deb 4023 struct hci_chan *chan;
6ed58ec5 4024 struct sk_buff *skb;
02b20f0b 4025 int quote, cnt, tmp;
6ed58ec5
VT
4026
4027 BT_DBG("%s", hdev->name);
4028
52087a79
LAD
4029 if (!hci_conn_num(hdev, LE_LINK))
4030 return;
4031
6ed58ec5
VT
4032 if (!test_bit(HCI_RAW, &hdev->flags)) {
4033 /* LE tx timeout must be longer than maximum
4034 * link supervision timeout (40.9 seconds) */
bae1f5d9 4035 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4036 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4037 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4038 }
4039
4040 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4041 tmp = cnt;
73d80deb 4042 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4043 u32 priority = (skb_peek(&chan->data_q))->priority;
4044 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4045 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4046 skb->len, skb->priority);
6ed58ec5 4047
ec1cce24
LAD
4048 /* Stop if priority has changed */
4049 if (skb->priority < priority)
4050 break;
4051
4052 skb = skb_dequeue(&chan->data_q);
4053
57d17d70 4054 hci_send_frame(hdev, skb);
6ed58ec5
VT
4055 hdev->le_last_tx = jiffies;
4056
4057 cnt--;
73d80deb
LAD
4058 chan->sent++;
4059 chan->conn->sent++;
6ed58ec5
VT
4060 }
4061 }
73d80deb 4062
6ed58ec5
VT
4063 if (hdev->le_pkts)
4064 hdev->le_cnt = cnt;
4065 else
4066 hdev->acl_cnt = cnt;
02b20f0b
LAD
4067
4068 if (cnt != tmp)
4069 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4070}
4071
3eff45ea 4072static void hci_tx_work(struct work_struct *work)
1da177e4 4073{
3eff45ea 4074 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4075 struct sk_buff *skb;
4076
6ed58ec5 4077 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4078 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4079
52de599e
MH
4080 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4081 /* Schedule queues and send stuff to HCI driver */
4082 hci_sched_acl(hdev);
4083 hci_sched_sco(hdev);
4084 hci_sched_esco(hdev);
4085 hci_sched_le(hdev);
4086 }
6ed58ec5 4087
1da177e4
LT
4088 /* Send next queued raw (unknown type) packet */
4089 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4090 hci_send_frame(hdev, skb);
1da177e4
LT
4091}
4092
25985edc 4093/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4094
4095/* ACL data packet */
6039aa73 4096static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4097{
4098 struct hci_acl_hdr *hdr = (void *) skb->data;
4099 struct hci_conn *conn;
4100 __u16 handle, flags;
4101
4102 skb_pull(skb, HCI_ACL_HDR_SIZE);
4103
4104 handle = __le16_to_cpu(hdr->handle);
4105 flags = hci_flags(handle);
4106 handle = hci_handle(handle);
4107
f0e09510 4108 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4109 handle, flags);
1da177e4
LT
4110
4111 hdev->stat.acl_rx++;
4112
4113 hci_dev_lock(hdev);
4114 conn = hci_conn_hash_lookup_handle(hdev, handle);
4115 hci_dev_unlock(hdev);
8e87d142 4116
1da177e4 4117 if (conn) {
65983fc7 4118 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4119
1da177e4 4120 /* Send to upper protocol */
686ebf28
UF
4121 l2cap_recv_acldata(conn, skb, flags);
4122 return;
1da177e4 4123 } else {
8e87d142 4124 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4125 hdev->name, handle);
1da177e4
LT
4126 }
4127
4128 kfree_skb(skb);
4129}
4130
4131/* SCO data packet */
6039aa73 4132static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4133{
4134 struct hci_sco_hdr *hdr = (void *) skb->data;
4135 struct hci_conn *conn;
4136 __u16 handle;
4137
4138 skb_pull(skb, HCI_SCO_HDR_SIZE);
4139
4140 handle = __le16_to_cpu(hdr->handle);
4141
f0e09510 4142 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4143
4144 hdev->stat.sco_rx++;
4145
4146 hci_dev_lock(hdev);
4147 conn = hci_conn_hash_lookup_handle(hdev, handle);
4148 hci_dev_unlock(hdev);
4149
4150 if (conn) {
1da177e4 4151 /* Send to upper protocol */
686ebf28
UF
4152 sco_recv_scodata(conn, skb);
4153 return;
1da177e4 4154 } else {
8e87d142 4155 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4156 hdev->name, handle);
1da177e4
LT
4157 }
4158
4159 kfree_skb(skb);
4160}
4161
9238f36a
JH
4162static bool hci_req_is_complete(struct hci_dev *hdev)
4163{
4164 struct sk_buff *skb;
4165
4166 skb = skb_peek(&hdev->cmd_q);
4167 if (!skb)
4168 return true;
4169
4170 return bt_cb(skb)->req.start;
4171}
4172
42c6b129
JH
4173static void hci_resend_last(struct hci_dev *hdev)
4174{
4175 struct hci_command_hdr *sent;
4176 struct sk_buff *skb;
4177 u16 opcode;
4178
4179 if (!hdev->sent_cmd)
4180 return;
4181
4182 sent = (void *) hdev->sent_cmd->data;
4183 opcode = __le16_to_cpu(sent->opcode);
4184 if (opcode == HCI_OP_RESET)
4185 return;
4186
4187 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4188 if (!skb)
4189 return;
4190
4191 skb_queue_head(&hdev->cmd_q, skb);
4192 queue_work(hdev->workqueue, &hdev->cmd_work);
4193}
4194
9238f36a
JH
4195void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4196{
4197 hci_req_complete_t req_complete = NULL;
4198 struct sk_buff *skb;
4199 unsigned long flags;
4200
4201 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4202
42c6b129
JH
4203 /* If the completed command doesn't match the last one that was
4204 * sent we need to do special handling of it.
9238f36a 4205 */
42c6b129
JH
4206 if (!hci_sent_cmd_data(hdev, opcode)) {
4207 /* Some CSR based controllers generate a spontaneous
4208 * reset complete event during init and any pending
4209 * command will never be completed. In such a case we
4210 * need to resend whatever was the last sent
4211 * command.
4212 */
4213 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4214 hci_resend_last(hdev);
4215
9238f36a 4216 return;
42c6b129 4217 }
9238f36a
JH
4218
4219 /* If the command succeeded and there's still more commands in
4220 * this request the request is not yet complete.
4221 */
4222 if (!status && !hci_req_is_complete(hdev))
4223 return;
4224
4225 /* If this was the last command in a request the complete
4226 * callback would be found in hdev->sent_cmd instead of the
4227 * command queue (hdev->cmd_q).
4228 */
4229 if (hdev->sent_cmd) {
4230 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4231
4232 if (req_complete) {
4233 /* We must set the complete callback to NULL to
4234 * avoid calling the callback more than once if
4235 * this function gets called again.
4236 */
4237 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4238
9238f36a 4239 goto call_complete;
53e21fbc 4240 }
9238f36a
JH
4241 }
4242
4243 /* Remove all pending commands belonging to this request */
4244 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4245 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4246 if (bt_cb(skb)->req.start) {
4247 __skb_queue_head(&hdev->cmd_q, skb);
4248 break;
4249 }
4250
4251 req_complete = bt_cb(skb)->req.complete;
4252 kfree_skb(skb);
4253 }
4254 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4255
4256call_complete:
4257 if (req_complete)
4258 req_complete(hdev, status);
4259}
4260
b78752cc 4261static void hci_rx_work(struct work_struct *work)
1da177e4 4262{
b78752cc 4263 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4264 struct sk_buff *skb;
4265
4266 BT_DBG("%s", hdev->name);
4267
1da177e4 4268 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4269 /* Send copy to monitor */
4270 hci_send_to_monitor(hdev, skb);
4271
1da177e4
LT
4272 if (atomic_read(&hdev->promisc)) {
4273 /* Send copy to the sockets */
470fe1b5 4274 hci_send_to_sock(hdev, skb);
1da177e4
LT
4275 }
4276
0736cfa8
MH
4277 if (test_bit(HCI_RAW, &hdev->flags) ||
4278 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4279 kfree_skb(skb);
4280 continue;
4281 }
4282
4283 if (test_bit(HCI_INIT, &hdev->flags)) {
4284 /* Don't process data packets in this states. */
0d48d939 4285 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4286 case HCI_ACLDATA_PKT:
4287 case HCI_SCODATA_PKT:
4288 kfree_skb(skb);
4289 continue;
3ff50b79 4290 }
1da177e4
LT
4291 }
4292
4293 /* Process frame */
0d48d939 4294 switch (bt_cb(skb)->pkt_type) {
1da177e4 4295 case HCI_EVENT_PKT:
b78752cc 4296 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4297 hci_event_packet(hdev, skb);
4298 break;
4299
4300 case HCI_ACLDATA_PKT:
4301 BT_DBG("%s ACL data packet", hdev->name);
4302 hci_acldata_packet(hdev, skb);
4303 break;
4304
4305 case HCI_SCODATA_PKT:
4306 BT_DBG("%s SCO data packet", hdev->name);
4307 hci_scodata_packet(hdev, skb);
4308 break;
4309
4310 default:
4311 kfree_skb(skb);
4312 break;
4313 }
4314 }
1da177e4
LT
4315}
4316
c347b765 4317static void hci_cmd_work(struct work_struct *work)
1da177e4 4318{
c347b765 4319 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4320 struct sk_buff *skb;
4321
2104786b
AE
4322 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4323 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4324
1da177e4 4325 /* Send queued commands */
5a08ecce
AE
4326 if (atomic_read(&hdev->cmd_cnt)) {
4327 skb = skb_dequeue(&hdev->cmd_q);
4328 if (!skb)
4329 return;
4330
7585b97a 4331 kfree_skb(hdev->sent_cmd);
1da177e4 4332
a675d7f1 4333 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4334 if (hdev->sent_cmd) {
1da177e4 4335 atomic_dec(&hdev->cmd_cnt);
57d17d70 4336 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
4337 if (test_bit(HCI_RESET, &hdev->flags))
4338 del_timer(&hdev->cmd_timer);
4339 else
4340 mod_timer(&hdev->cmd_timer,
5f246e89 4341 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
4342 } else {
4343 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4344 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4345 }
4346 }
4347}
This page took 1.106192 seconds and 5 git commands to generate.