Bluetooth: Don't write static address during power on
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
970c4e46
JH
38#include "smp.h"
39
b78752cc 40static void hci_rx_work(struct work_struct *work);
c347b765 41static void hci_cmd_work(struct work_struct *work);
3eff45ea 42static void hci_tx_work(struct work_struct *work);
1da177e4 43
1da177e4
LT
44/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
3df92b31
SL
52/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
1da177e4
LT
55/* ---- HCI notifications ---- */
56
6516455d 57static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 58{
040030ef 59 hci_sock_dev_event(hdev, event);
1da177e4
LT
60}
61
baf27f6e
MH
62/* ---- HCI debugfs entries ---- */
63
4b4148e9
MH
64static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
dfb826a8
MH
129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
cfbb2b5b
MH
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
70afe0b8
MH
167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
47219839
MH
192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
199 u8 i, val[16];
200
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
207
208 seq_printf(f, "%pUb\n", val);
47219839
MH
209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
baf27f6e
MH
227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
02d08d15
MH
263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
babdbb3c
MH
291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
041000b9
MH
315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
ebd1e33b
MH
329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
06f5b778
MH
354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
5afeac14
MH
403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
134c2a89
MH
449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
2bfa3531
MH
467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
2be48b65 475 hdev->idle_timeout = val;
2bfa3531
MH
476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495static int sniff_min_interval_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
500 return -EINVAL;
501
502 hci_dev_lock(hdev);
2be48b65 503 hdev->sniff_min_interval = val;
2bfa3531
MH
504 hci_dev_unlock(hdev);
505
506 return 0;
507}
508
509static int sniff_min_interval_get(void *data, u64 *val)
510{
511 struct hci_dev *hdev = data;
512
513 hci_dev_lock(hdev);
514 *val = hdev->sniff_min_interval;
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521 sniff_min_interval_set, "%llu\n");
522
523static int sniff_max_interval_set(void *data, u64 val)
524{
525 struct hci_dev *hdev = data;
526
527 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
528 return -EINVAL;
529
530 hci_dev_lock(hdev);
2be48b65 531 hdev->sniff_max_interval = val;
2bfa3531
MH
532 hci_dev_unlock(hdev);
533
534 return 0;
535}
536
537static int sniff_max_interval_get(void *data, u64 *val)
538{
539 struct hci_dev *hdev = data;
540
541 hci_dev_lock(hdev);
542 *val = hdev->sniff_max_interval;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549 sniff_max_interval_set, "%llu\n");
550
7a4cd51d
MH
551static int random_address_show(struct seq_file *f, void *p)
552{
553 struct hci_dev *hdev = f->private;
554
555 hci_dev_lock(hdev);
556 seq_printf(f, "%pMR\n", &hdev->random_addr);
557 hci_dev_unlock(hdev);
558
559 return 0;
560}
561
562static int random_address_open(struct inode *inode, struct file *file)
563{
564 return single_open(file, random_address_show, inode->i_private);
565}
566
567static const struct file_operations random_address_fops = {
568 .open = random_address_open,
569 .read = seq_read,
570 .llseek = seq_lseek,
571 .release = single_release,
572};
573
e7b8fc92
MH
574static int static_address_show(struct seq_file *f, void *p)
575{
576 struct hci_dev *hdev = f->private;
577
578 hci_dev_lock(hdev);
579 seq_printf(f, "%pMR\n", &hdev->static_addr);
580 hci_dev_unlock(hdev);
581
582 return 0;
583}
584
585static int static_address_open(struct inode *inode, struct file *file)
586{
587 return single_open(file, static_address_show, inode->i_private);
588}
589
590static const struct file_operations static_address_fops = {
591 .open = static_address_open,
592 .read = seq_read,
593 .llseek = seq_lseek,
594 .release = single_release,
595};
596
b32bba6c
MH
597static ssize_t force_static_address_read(struct file *file,
598 char __user *user_buf,
599 size_t count, loff_t *ppos)
92202185 600{
b32bba6c
MH
601 struct hci_dev *hdev = file->private_data;
602 char buf[3];
92202185 603
b32bba6c
MH
604 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
605 buf[1] = '\n';
606 buf[2] = '\0';
607 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
608}
609
b32bba6c
MH
610static ssize_t force_static_address_write(struct file *file,
611 const char __user *user_buf,
612 size_t count, loff_t *ppos)
92202185 613{
b32bba6c
MH
614 struct hci_dev *hdev = file->private_data;
615 char buf[32];
616 size_t buf_size = min(count, (sizeof(buf)-1));
617 bool enable;
92202185 618
b32bba6c
MH
619 if (test_bit(HCI_UP, &hdev->flags))
620 return -EBUSY;
92202185 621
b32bba6c
MH
622 if (copy_from_user(buf, user_buf, buf_size))
623 return -EFAULT;
624
625 buf[buf_size] = '\0';
626 if (strtobool(buf, &enable))
627 return -EINVAL;
628
629 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
630 return -EALREADY;
631
632 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
633
634 return count;
92202185
MH
635}
636
b32bba6c
MH
637static const struct file_operations force_static_address_fops = {
638 .open = simple_open,
639 .read = force_static_address_read,
640 .write = force_static_address_write,
641 .llseek = default_llseek,
642};
92202185 643
3698d704
MH
644static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
645{
646 struct hci_dev *hdev = f->private;
647 struct list_head *p, *n;
648
649 hci_dev_lock(hdev);
650 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
651 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
652 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
653 &irk->bdaddr, irk->addr_type,
654 16, irk->val, &irk->rpa);
655 }
656 hci_dev_unlock(hdev);
657
658 return 0;
659}
660
661static int identity_resolving_keys_open(struct inode *inode, struct file *file)
662{
663 return single_open(file, identity_resolving_keys_show,
664 inode->i_private);
665}
666
667static const struct file_operations identity_resolving_keys_fops = {
668 .open = identity_resolving_keys_open,
669 .read = seq_read,
670 .llseek = seq_lseek,
671 .release = single_release,
672};
673
8f8625cd
MH
674static int long_term_keys_show(struct seq_file *f, void *ptr)
675{
676 struct hci_dev *hdev = f->private;
677 struct list_head *p, *n;
678
679 hci_dev_lock(hdev);
f813f1be 680 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 681 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
f813f1be 682 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
8f8625cd
MH
683 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
684 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
685 8, ltk->rand, 16, ltk->val);
686 }
687 hci_dev_unlock(hdev);
688
689 return 0;
690}
691
692static int long_term_keys_open(struct inode *inode, struct file *file)
693{
694 return single_open(file, long_term_keys_show, inode->i_private);
695}
696
697static const struct file_operations long_term_keys_fops = {
698 .open = long_term_keys_open,
699 .read = seq_read,
700 .llseek = seq_lseek,
701 .release = single_release,
702};
703
4e70c7e7
MH
704static int conn_min_interval_set(void *data, u64 val)
705{
706 struct hci_dev *hdev = data;
707
708 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
709 return -EINVAL;
710
711 hci_dev_lock(hdev);
2be48b65 712 hdev->le_conn_min_interval = val;
4e70c7e7
MH
713 hci_dev_unlock(hdev);
714
715 return 0;
716}
717
718static int conn_min_interval_get(void *data, u64 *val)
719{
720 struct hci_dev *hdev = data;
721
722 hci_dev_lock(hdev);
723 *val = hdev->le_conn_min_interval;
724 hci_dev_unlock(hdev);
725
726 return 0;
727}
728
729DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
730 conn_min_interval_set, "%llu\n");
731
732static int conn_max_interval_set(void *data, u64 val)
733{
734 struct hci_dev *hdev = data;
735
736 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
737 return -EINVAL;
738
739 hci_dev_lock(hdev);
2be48b65 740 hdev->le_conn_max_interval = val;
4e70c7e7
MH
741 hci_dev_unlock(hdev);
742
743 return 0;
744}
745
746static int conn_max_interval_get(void *data, u64 *val)
747{
748 struct hci_dev *hdev = data;
749
750 hci_dev_lock(hdev);
751 *val = hdev->le_conn_max_interval;
752 hci_dev_unlock(hdev);
753
754 return 0;
755}
756
757DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
758 conn_max_interval_set, "%llu\n");
759
3f959d46
MH
760static int adv_channel_map_set(void *data, u64 val)
761{
762 struct hci_dev *hdev = data;
763
764 if (val < 0x01 || val > 0x07)
765 return -EINVAL;
766
767 hci_dev_lock(hdev);
768 hdev->le_adv_channel_map = val;
769 hci_dev_unlock(hdev);
770
771 return 0;
772}
773
774static int adv_channel_map_get(void *data, u64 *val)
775{
776 struct hci_dev *hdev = data;
777
778 hci_dev_lock(hdev);
779 *val = hdev->le_adv_channel_map;
780 hci_dev_unlock(hdev);
781
782 return 0;
783}
784
785DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
786 adv_channel_map_set, "%llu\n");
787
89863109
JR
788static ssize_t lowpan_read(struct file *file, char __user *user_buf,
789 size_t count, loff_t *ppos)
790{
791 struct hci_dev *hdev = file->private_data;
792 char buf[3];
793
794 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
795 buf[1] = '\n';
796 buf[2] = '\0';
797 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
798}
799
800static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
801 size_t count, loff_t *position)
802{
803 struct hci_dev *hdev = fp->private_data;
804 bool enable;
805 char buf[32];
806 size_t buf_size = min(count, (sizeof(buf)-1));
807
808 if (copy_from_user(buf, user_buffer, buf_size))
809 return -EFAULT;
810
811 buf[buf_size] = '\0';
812
813 if (strtobool(buf, &enable) < 0)
814 return -EINVAL;
815
816 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
817 return -EALREADY;
818
819 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
820
821 return count;
822}
823
824static const struct file_operations lowpan_debugfs_fops = {
825 .open = simple_open,
826 .read = lowpan_read,
827 .write = lowpan_write,
828 .llseek = default_llseek,
829};
830
1da177e4
LT
831/* ---- HCI requests ---- */
832
42c6b129 833static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 834{
42c6b129 835 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
836
837 if (hdev->req_status == HCI_REQ_PEND) {
838 hdev->req_result = result;
839 hdev->req_status = HCI_REQ_DONE;
840 wake_up_interruptible(&hdev->req_wait_q);
841 }
842}
843
844static void hci_req_cancel(struct hci_dev *hdev, int err)
845{
846 BT_DBG("%s err 0x%2.2x", hdev->name, err);
847
848 if (hdev->req_status == HCI_REQ_PEND) {
849 hdev->req_result = err;
850 hdev->req_status = HCI_REQ_CANCELED;
851 wake_up_interruptible(&hdev->req_wait_q);
852 }
853}
854
77a63e0a
FW
855static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
856 u8 event)
75e84b7c
JH
857{
858 struct hci_ev_cmd_complete *ev;
859 struct hci_event_hdr *hdr;
860 struct sk_buff *skb;
861
862 hci_dev_lock(hdev);
863
864 skb = hdev->recv_evt;
865 hdev->recv_evt = NULL;
866
867 hci_dev_unlock(hdev);
868
869 if (!skb)
870 return ERR_PTR(-ENODATA);
871
872 if (skb->len < sizeof(*hdr)) {
873 BT_ERR("Too short HCI event");
874 goto failed;
875 }
876
877 hdr = (void *) skb->data;
878 skb_pull(skb, HCI_EVENT_HDR_SIZE);
879
7b1abbbe
JH
880 if (event) {
881 if (hdr->evt != event)
882 goto failed;
883 return skb;
884 }
885
75e84b7c
JH
886 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
887 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
888 goto failed;
889 }
890
891 if (skb->len < sizeof(*ev)) {
892 BT_ERR("Too short cmd_complete event");
893 goto failed;
894 }
895
896 ev = (void *) skb->data;
897 skb_pull(skb, sizeof(*ev));
898
899 if (opcode == __le16_to_cpu(ev->opcode))
900 return skb;
901
902 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
903 __le16_to_cpu(ev->opcode));
904
905failed:
906 kfree_skb(skb);
907 return ERR_PTR(-ENODATA);
908}
909
7b1abbbe 910struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 911 const void *param, u8 event, u32 timeout)
75e84b7c
JH
912{
913 DECLARE_WAITQUEUE(wait, current);
914 struct hci_request req;
915 int err = 0;
916
917 BT_DBG("%s", hdev->name);
918
919 hci_req_init(&req, hdev);
920
7b1abbbe 921 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
922
923 hdev->req_status = HCI_REQ_PEND;
924
925 err = hci_req_run(&req, hci_req_sync_complete);
926 if (err < 0)
927 return ERR_PTR(err);
928
929 add_wait_queue(&hdev->req_wait_q, &wait);
930 set_current_state(TASK_INTERRUPTIBLE);
931
932 schedule_timeout(timeout);
933
934 remove_wait_queue(&hdev->req_wait_q, &wait);
935
936 if (signal_pending(current))
937 return ERR_PTR(-EINTR);
938
939 switch (hdev->req_status) {
940 case HCI_REQ_DONE:
941 err = -bt_to_errno(hdev->req_result);
942 break;
943
944 case HCI_REQ_CANCELED:
945 err = -hdev->req_result;
946 break;
947
948 default:
949 err = -ETIMEDOUT;
950 break;
951 }
952
953 hdev->req_status = hdev->req_result = 0;
954
955 BT_DBG("%s end: err %d", hdev->name, err);
956
957 if (err < 0)
958 return ERR_PTR(err);
959
7b1abbbe
JH
960 return hci_get_cmd_complete(hdev, opcode, event);
961}
962EXPORT_SYMBOL(__hci_cmd_sync_ev);
963
964struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 965 const void *param, u32 timeout)
7b1abbbe
JH
966{
967 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
968}
969EXPORT_SYMBOL(__hci_cmd_sync);
970
1da177e4 971/* Execute request and wait for completion. */
01178cd4 972static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
973 void (*func)(struct hci_request *req,
974 unsigned long opt),
01178cd4 975 unsigned long opt, __u32 timeout)
1da177e4 976{
42c6b129 977 struct hci_request req;
1da177e4
LT
978 DECLARE_WAITQUEUE(wait, current);
979 int err = 0;
980
981 BT_DBG("%s start", hdev->name);
982
42c6b129
JH
983 hci_req_init(&req, hdev);
984
1da177e4
LT
985 hdev->req_status = HCI_REQ_PEND;
986
42c6b129 987 func(&req, opt);
53cce22d 988
42c6b129
JH
989 err = hci_req_run(&req, hci_req_sync_complete);
990 if (err < 0) {
53cce22d 991 hdev->req_status = 0;
920c8300
AG
992
993 /* ENODATA means the HCI request command queue is empty.
994 * This can happen when a request with conditionals doesn't
995 * trigger any commands to be sent. This is normal behavior
996 * and should not trigger an error return.
42c6b129 997 */
920c8300
AG
998 if (err == -ENODATA)
999 return 0;
1000
1001 return err;
53cce22d
JH
1002 }
1003
bc4445c7
AG
1004 add_wait_queue(&hdev->req_wait_q, &wait);
1005 set_current_state(TASK_INTERRUPTIBLE);
1006
1da177e4
LT
1007 schedule_timeout(timeout);
1008
1009 remove_wait_queue(&hdev->req_wait_q, &wait);
1010
1011 if (signal_pending(current))
1012 return -EINTR;
1013
1014 switch (hdev->req_status) {
1015 case HCI_REQ_DONE:
e175072f 1016 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1017 break;
1018
1019 case HCI_REQ_CANCELED:
1020 err = -hdev->req_result;
1021 break;
1022
1023 default:
1024 err = -ETIMEDOUT;
1025 break;
3ff50b79 1026 }
1da177e4 1027
a5040efa 1028 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1029
1030 BT_DBG("%s end: err %d", hdev->name, err);
1031
1032 return err;
1033}
1034
01178cd4 1035static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1036 void (*req)(struct hci_request *req,
1037 unsigned long opt),
01178cd4 1038 unsigned long opt, __u32 timeout)
1da177e4
LT
1039{
1040 int ret;
1041
7c6a329e
MH
1042 if (!test_bit(HCI_UP, &hdev->flags))
1043 return -ENETDOWN;
1044
1da177e4
LT
1045 /* Serialize all requests */
1046 hci_req_lock(hdev);
01178cd4 1047 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1048 hci_req_unlock(hdev);
1049
1050 return ret;
1051}
1052
42c6b129 1053static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1054{
42c6b129 1055 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1056
1057 /* Reset device */
42c6b129
JH
1058 set_bit(HCI_RESET, &req->hdev->flags);
1059 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1060}
1061
42c6b129 1062static void bredr_init(struct hci_request *req)
1da177e4 1063{
42c6b129 1064 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1065
1da177e4 1066 /* Read Local Supported Features */
42c6b129 1067 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1068
1143e5a6 1069 /* Read Local Version */
42c6b129 1070 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1071
1072 /* Read BD Address */
42c6b129 1073 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1074}
1075
42c6b129 1076static void amp_init(struct hci_request *req)
e61ef499 1077{
42c6b129 1078 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1079
e61ef499 1080 /* Read Local Version */
42c6b129 1081 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1082
f6996cfe
MH
1083 /* Read Local Supported Commands */
1084 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1085
1086 /* Read Local Supported Features */
1087 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1088
6bcbc489 1089 /* Read Local AMP Info */
42c6b129 1090 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1091
1092 /* Read Data Blk size */
42c6b129 1093 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1094
f38ba941
MH
1095 /* Read Flow Control Mode */
1096 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1097
7528ca1c
MH
1098 /* Read Location Data */
1099 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1100}
1101
42c6b129 1102static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1103{
42c6b129 1104 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1105
1106 BT_DBG("%s %ld", hdev->name, opt);
1107
11778716
AE
1108 /* Reset */
1109 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1110 hci_reset_req(req, 0);
11778716 1111
e61ef499
AE
1112 switch (hdev->dev_type) {
1113 case HCI_BREDR:
42c6b129 1114 bredr_init(req);
e61ef499
AE
1115 break;
1116
1117 case HCI_AMP:
42c6b129 1118 amp_init(req);
e61ef499
AE
1119 break;
1120
1121 default:
1122 BT_ERR("Unknown device type %d", hdev->dev_type);
1123 break;
1124 }
e61ef499
AE
1125}
1126
42c6b129 1127static void bredr_setup(struct hci_request *req)
2177bab5 1128{
4ca048e3
MH
1129 struct hci_dev *hdev = req->hdev;
1130
2177bab5
JH
1131 __le16 param;
1132 __u8 flt_type;
1133
1134 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1135 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1136
1137 /* Read Class of Device */
42c6b129 1138 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1139
1140 /* Read Local Name */
42c6b129 1141 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1142
1143 /* Read Voice Setting */
42c6b129 1144 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1145
b4cb9fb2
MH
1146 /* Read Number of Supported IAC */
1147 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1148
4b836f39
MH
1149 /* Read Current IAC LAP */
1150 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1151
2177bab5
JH
1152 /* Clear Event Filters */
1153 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1154 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1155
1156 /* Connection accept timeout ~20 secs */
1157 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1158 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1159
4ca048e3
MH
1160 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1161 * but it does not support page scan related HCI commands.
1162 */
1163 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1164 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1165 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1166 }
2177bab5
JH
1167}
1168
42c6b129 1169static void le_setup(struct hci_request *req)
2177bab5 1170{
c73eee91
JH
1171 struct hci_dev *hdev = req->hdev;
1172
2177bab5 1173 /* Read LE Buffer Size */
42c6b129 1174 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1175
1176 /* Read LE Local Supported Features */
42c6b129 1177 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
1178
1179 /* Read LE Advertising Channel TX Power */
42c6b129 1180 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1181
1182 /* Read LE White List Size */
42c6b129 1183 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
1184
1185 /* Read LE Supported States */
42c6b129 1186 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
1187
1188 /* LE-only controllers have LE implicitly enabled */
1189 if (!lmp_bredr_capable(hdev))
1190 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1191}
1192
1193static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1194{
1195 if (lmp_ext_inq_capable(hdev))
1196 return 0x02;
1197
1198 if (lmp_inq_rssi_capable(hdev))
1199 return 0x01;
1200
1201 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1202 hdev->lmp_subver == 0x0757)
1203 return 0x01;
1204
1205 if (hdev->manufacturer == 15) {
1206 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1207 return 0x01;
1208 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1209 return 0x01;
1210 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1211 return 0x01;
1212 }
1213
1214 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1215 hdev->lmp_subver == 0x1805)
1216 return 0x01;
1217
1218 return 0x00;
1219}
1220
42c6b129 1221static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1222{
1223 u8 mode;
1224
42c6b129 1225 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1226
42c6b129 1227 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1228}
1229
42c6b129 1230static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1231{
42c6b129
JH
1232 struct hci_dev *hdev = req->hdev;
1233
2177bab5
JH
1234 /* The second byte is 0xff instead of 0x9f (two reserved bits
1235 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1236 * command otherwise.
1237 */
1238 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1239
1240 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1241 * any event mask for pre 1.2 devices.
1242 */
1243 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1244 return;
1245
1246 if (lmp_bredr_capable(hdev)) {
1247 events[4] |= 0x01; /* Flow Specification Complete */
1248 events[4] |= 0x02; /* Inquiry Result with RSSI */
1249 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1250 events[5] |= 0x08; /* Synchronous Connection Complete */
1251 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1252 } else {
1253 /* Use a different default for LE-only devices */
1254 memset(events, 0, sizeof(events));
1255 events[0] |= 0x10; /* Disconnection Complete */
1256 events[0] |= 0x80; /* Encryption Change */
1257 events[1] |= 0x08; /* Read Remote Version Information Complete */
1258 events[1] |= 0x20; /* Command Complete */
1259 events[1] |= 0x40; /* Command Status */
1260 events[1] |= 0x80; /* Hardware Error */
1261 events[2] |= 0x04; /* Number of Completed Packets */
1262 events[3] |= 0x02; /* Data Buffer Overflow */
1263 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1264 }
1265
1266 if (lmp_inq_rssi_capable(hdev))
1267 events[4] |= 0x02; /* Inquiry Result with RSSI */
1268
1269 if (lmp_sniffsubr_capable(hdev))
1270 events[5] |= 0x20; /* Sniff Subrating */
1271
1272 if (lmp_pause_enc_capable(hdev))
1273 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1274
1275 if (lmp_ext_inq_capable(hdev))
1276 events[5] |= 0x40; /* Extended Inquiry Result */
1277
1278 if (lmp_no_flush_capable(hdev))
1279 events[7] |= 0x01; /* Enhanced Flush Complete */
1280
1281 if (lmp_lsto_capable(hdev))
1282 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1283
1284 if (lmp_ssp_capable(hdev)) {
1285 events[6] |= 0x01; /* IO Capability Request */
1286 events[6] |= 0x02; /* IO Capability Response */
1287 events[6] |= 0x04; /* User Confirmation Request */
1288 events[6] |= 0x08; /* User Passkey Request */
1289 events[6] |= 0x10; /* Remote OOB Data Request */
1290 events[6] |= 0x20; /* Simple Pairing Complete */
1291 events[7] |= 0x04; /* User Passkey Notification */
1292 events[7] |= 0x08; /* Keypress Notification */
1293 events[7] |= 0x10; /* Remote Host Supported
1294 * Features Notification
1295 */
1296 }
1297
1298 if (lmp_le_capable(hdev))
1299 events[7] |= 0x20; /* LE Meta-Event */
1300
42c6b129 1301 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1302
1303 if (lmp_le_capable(hdev)) {
1304 memset(events, 0, sizeof(events));
1305 events[0] = 0x1f;
42c6b129
JH
1306 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1307 sizeof(events), events);
2177bab5
JH
1308 }
1309}
1310
42c6b129 1311static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1312{
42c6b129
JH
1313 struct hci_dev *hdev = req->hdev;
1314
2177bab5 1315 if (lmp_bredr_capable(hdev))
42c6b129 1316 bredr_setup(req);
56f87901
JH
1317 else
1318 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1319
1320 if (lmp_le_capable(hdev))
42c6b129 1321 le_setup(req);
2177bab5 1322
42c6b129 1323 hci_setup_event_mask(req);
2177bab5 1324
3f8e2d75
JH
1325 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1326 * local supported commands HCI command.
1327 */
1328 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1329 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1330
1331 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1332 /* When SSP is available, then the host features page
1333 * should also be available as well. However some
1334 * controllers list the max_page as 0 as long as SSP
1335 * has not been enabled. To achieve proper debugging
1336 * output, force the minimum max_page to 1 at least.
1337 */
1338 hdev->max_page = 0x01;
1339
2177bab5
JH
1340 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1341 u8 mode = 0x01;
42c6b129
JH
1342 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1343 sizeof(mode), &mode);
2177bab5
JH
1344 } else {
1345 struct hci_cp_write_eir cp;
1346
1347 memset(hdev->eir, 0, sizeof(hdev->eir));
1348 memset(&cp, 0, sizeof(cp));
1349
42c6b129 1350 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1351 }
1352 }
1353
1354 if (lmp_inq_rssi_capable(hdev))
42c6b129 1355 hci_setup_inquiry_mode(req);
2177bab5
JH
1356
1357 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1358 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1359
1360 if (lmp_ext_feat_capable(hdev)) {
1361 struct hci_cp_read_local_ext_features cp;
1362
1363 cp.page = 0x01;
42c6b129
JH
1364 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1365 sizeof(cp), &cp);
2177bab5
JH
1366 }
1367
1368 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1369 u8 enable = 1;
42c6b129
JH
1370 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1371 &enable);
2177bab5
JH
1372 }
1373}
1374
42c6b129 1375static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1376{
42c6b129 1377 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1378 struct hci_cp_write_def_link_policy cp;
1379 u16 link_policy = 0;
1380
1381 if (lmp_rswitch_capable(hdev))
1382 link_policy |= HCI_LP_RSWITCH;
1383 if (lmp_hold_capable(hdev))
1384 link_policy |= HCI_LP_HOLD;
1385 if (lmp_sniff_capable(hdev))
1386 link_policy |= HCI_LP_SNIFF;
1387 if (lmp_park_capable(hdev))
1388 link_policy |= HCI_LP_PARK;
1389
1390 cp.policy = cpu_to_le16(link_policy);
42c6b129 1391 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1392}
1393
42c6b129 1394static void hci_set_le_support(struct hci_request *req)
2177bab5 1395{
42c6b129 1396 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1397 struct hci_cp_write_le_host_supported cp;
1398
c73eee91
JH
1399 /* LE-only devices do not support explicit enablement */
1400 if (!lmp_bredr_capable(hdev))
1401 return;
1402
2177bab5
JH
1403 memset(&cp, 0, sizeof(cp));
1404
1405 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1406 cp.le = 0x01;
1407 cp.simul = lmp_le_br_capable(hdev);
1408 }
1409
1410 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1411 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1412 &cp);
2177bab5
JH
1413}
1414
d62e6d67
JH
1415static void hci_set_event_mask_page_2(struct hci_request *req)
1416{
1417 struct hci_dev *hdev = req->hdev;
1418 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1419
1420 /* If Connectionless Slave Broadcast master role is supported
1421 * enable all necessary events for it.
1422 */
53b834d2 1423 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1424 events[1] |= 0x40; /* Triggered Clock Capture */
1425 events[1] |= 0x80; /* Synchronization Train Complete */
1426 events[2] |= 0x10; /* Slave Page Response Timeout */
1427 events[2] |= 0x20; /* CSB Channel Map Change */
1428 }
1429
1430 /* If Connectionless Slave Broadcast slave role is supported
1431 * enable all necessary events for it.
1432 */
53b834d2 1433 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1434 events[2] |= 0x01; /* Synchronization Train Received */
1435 events[2] |= 0x02; /* CSB Receive */
1436 events[2] |= 0x04; /* CSB Timeout */
1437 events[2] |= 0x08; /* Truncated Page Complete */
1438 }
1439
40c59fcb
MH
1440 /* Enable Authenticated Payload Timeout Expired event if supported */
1441 if (lmp_ping_capable(hdev))
1442 events[2] |= 0x80;
1443
d62e6d67
JH
1444 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1445}
1446
42c6b129 1447static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1448{
42c6b129 1449 struct hci_dev *hdev = req->hdev;
d2c5d77f 1450 u8 p;
42c6b129 1451
b8f4e068
GP
1452 /* Some Broadcom based Bluetooth controllers do not support the
1453 * Delete Stored Link Key command. They are clearly indicating its
1454 * absence in the bit mask of supported commands.
1455 *
1456 * Check the supported commands and only if the the command is marked
1457 * as supported send it. If not supported assume that the controller
1458 * does not have actual support for stored link keys which makes this
1459 * command redundant anyway.
f9f462fa
MH
1460 *
1461 * Some controllers indicate that they support handling deleting
1462 * stored link keys, but they don't. The quirk lets a driver
1463 * just disable this command.
637b4cae 1464 */
f9f462fa
MH
1465 if (hdev->commands[6] & 0x80 &&
1466 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1467 struct hci_cp_delete_stored_link_key cp;
1468
1469 bacpy(&cp.bdaddr, BDADDR_ANY);
1470 cp.delete_all = 0x01;
1471 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1472 sizeof(cp), &cp);
1473 }
1474
2177bab5 1475 if (hdev->commands[5] & 0x10)
42c6b129 1476 hci_setup_link_policy(req);
2177bab5 1477
79830f66 1478 if (lmp_le_capable(hdev)) {
b32bba6c
MH
1479 /* If the controller has a public BD_ADDR, then by default
1480 * use that one. If this is a LE only controller without
1481 * a public address, default to the random address.
1482 *
1483 * For debugging purposes it is possible to force
1484 * controllers with a public address to use the
1485 * random address instead.
1486 */
1487 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
1488 !bacmp(&hdev->bdaddr, BDADDR_ANY))
1489 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1490 else
1491 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
79830f66 1492
42c6b129 1493 hci_set_le_support(req);
79830f66 1494 }
d2c5d77f
JH
1495
1496 /* Read features beyond page 1 if available */
1497 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1498 struct hci_cp_read_local_ext_features cp;
1499
1500 cp.page = p;
1501 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1502 sizeof(cp), &cp);
1503 }
2177bab5
JH
1504}
1505
5d4e7e8d
JH
1506static void hci_init4_req(struct hci_request *req, unsigned long opt)
1507{
1508 struct hci_dev *hdev = req->hdev;
1509
d62e6d67
JH
1510 /* Set event mask page 2 if the HCI command for it is supported */
1511 if (hdev->commands[22] & 0x04)
1512 hci_set_event_mask_page_2(req);
1513
5d4e7e8d 1514 /* Check for Synchronization Train support */
53b834d2 1515 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1516 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1517
1518 /* Enable Secure Connections if supported and configured */
5afeac14
MH
1519 if ((lmp_sc_capable(hdev) ||
1520 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
a6d0d690
MH
1521 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1522 u8 support = 0x01;
1523 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1524 sizeof(support), &support);
1525 }
5d4e7e8d
JH
1526}
1527
2177bab5
JH
1528static int __hci_init(struct hci_dev *hdev)
1529{
1530 int err;
1531
1532 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1533 if (err < 0)
1534 return err;
1535
4b4148e9
MH
1536 /* The Device Under Test (DUT) mode is special and available for
1537 * all controller types. So just create it early on.
1538 */
1539 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1540 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1541 &dut_mode_fops);
1542 }
1543
2177bab5
JH
1544 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1545 * BR/EDR/LE type controllers. AMP controllers only need the
1546 * first stage init.
1547 */
1548 if (hdev->dev_type != HCI_BREDR)
1549 return 0;
1550
1551 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1552 if (err < 0)
1553 return err;
1554
5d4e7e8d
JH
1555 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1556 if (err < 0)
1557 return err;
1558
baf27f6e
MH
1559 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1560 if (err < 0)
1561 return err;
1562
1563 /* Only create debugfs entries during the initial setup
1564 * phase and not every time the controller gets powered on.
1565 */
1566 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1567 return 0;
1568
dfb826a8
MH
1569 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1570 &features_fops);
ceeb3bc0
MH
1571 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1572 &hdev->manufacturer);
1573 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1574 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1575 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1576 &blacklist_fops);
47219839
MH
1577 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1578
baf27f6e
MH
1579 if (lmp_bredr_capable(hdev)) {
1580 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1581 hdev, &inquiry_cache_fops);
02d08d15
MH
1582 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1583 hdev, &link_keys_fops);
babdbb3c
MH
1584 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1585 hdev, &dev_class_fops);
041000b9
MH
1586 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1587 hdev, &voice_setting_fops);
baf27f6e
MH
1588 }
1589
06f5b778 1590 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1591 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1592 hdev, &auto_accept_delay_fops);
06f5b778
MH
1593 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1594 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1595 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1596 hdev, &force_sc_support_fops);
134c2a89
MH
1597 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1598 hdev, &sc_only_mode_fops);
06f5b778 1599 }
ebd1e33b 1600
2bfa3531
MH
1601 if (lmp_sniff_capable(hdev)) {
1602 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1603 hdev, &idle_timeout_fops);
1604 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1605 hdev, &sniff_min_interval_fops);
1606 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1607 hdev, &sniff_max_interval_fops);
1608 }
1609
d0f729b8 1610 if (lmp_le_capable(hdev)) {
7a4cd51d
MH
1611 debugfs_create_file("random_address", 0444, hdev->debugfs,
1612 hdev, &random_address_fops);
b32bba6c
MH
1613 debugfs_create_file("static_address", 0444, hdev->debugfs,
1614 hdev, &static_address_fops);
1615
1616 /* For controllers with a public address, provide a debug
1617 * option to force the usage of the configured static
1618 * address. By default the public address is used.
1619 */
1620 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1621 debugfs_create_file("force_static_address", 0644,
1622 hdev->debugfs, hdev,
1623 &force_static_address_fops);
1624
d0f729b8
MH
1625 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1626 &hdev->le_white_list_size);
3698d704
MH
1627 debugfs_create_file("identity_resolving_keys", 0400,
1628 hdev->debugfs, hdev,
1629 &identity_resolving_keys_fops);
8f8625cd
MH
1630 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1631 hdev, &long_term_keys_fops);
4e70c7e7
MH
1632 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1633 hdev, &conn_min_interval_fops);
1634 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1635 hdev, &conn_max_interval_fops);
3f959d46
MH
1636 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1637 hdev, &adv_channel_map_fops);
89863109
JR
1638 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1639 &lowpan_debugfs_fops);
d0f729b8 1640 }
e7b8fc92 1641
baf27f6e 1642 return 0;
2177bab5
JH
1643}
1644
42c6b129 1645static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1646{
1647 __u8 scan = opt;
1648
42c6b129 1649 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1650
1651 /* Inquiry and Page scans */
42c6b129 1652 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1653}
1654
42c6b129 1655static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1656{
1657 __u8 auth = opt;
1658
42c6b129 1659 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1660
1661 /* Authentication */
42c6b129 1662 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1663}
1664
42c6b129 1665static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1666{
1667 __u8 encrypt = opt;
1668
42c6b129 1669 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1670
e4e8e37c 1671 /* Encryption */
42c6b129 1672 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1673}
1674
42c6b129 1675static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1676{
1677 __le16 policy = cpu_to_le16(opt);
1678
42c6b129 1679 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1680
1681 /* Default link policy */
42c6b129 1682 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1683}
1684
8e87d142 1685/* Get HCI device by index.
1da177e4
LT
1686 * Device is held on return. */
1687struct hci_dev *hci_dev_get(int index)
1688{
8035ded4 1689 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1690
1691 BT_DBG("%d", index);
1692
1693 if (index < 0)
1694 return NULL;
1695
1696 read_lock(&hci_dev_list_lock);
8035ded4 1697 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1698 if (d->id == index) {
1699 hdev = hci_dev_hold(d);
1700 break;
1701 }
1702 }
1703 read_unlock(&hci_dev_list_lock);
1704 return hdev;
1705}
1da177e4
LT
1706
1707/* ---- Inquiry support ---- */
ff9ef578 1708
30dc78e1
JH
1709bool hci_discovery_active(struct hci_dev *hdev)
1710{
1711 struct discovery_state *discov = &hdev->discovery;
1712
6fbe195d 1713 switch (discov->state) {
343f935b 1714 case DISCOVERY_FINDING:
6fbe195d 1715 case DISCOVERY_RESOLVING:
30dc78e1
JH
1716 return true;
1717
6fbe195d
AG
1718 default:
1719 return false;
1720 }
30dc78e1
JH
1721}
1722
ff9ef578
JH
1723void hci_discovery_set_state(struct hci_dev *hdev, int state)
1724{
1725 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1726
1727 if (hdev->discovery.state == state)
1728 return;
1729
1730 switch (state) {
1731 case DISCOVERY_STOPPED:
7b99b659
AG
1732 if (hdev->discovery.state != DISCOVERY_STARTING)
1733 mgmt_discovering(hdev, 0);
ff9ef578
JH
1734 break;
1735 case DISCOVERY_STARTING:
1736 break;
343f935b 1737 case DISCOVERY_FINDING:
ff9ef578
JH
1738 mgmt_discovering(hdev, 1);
1739 break;
30dc78e1
JH
1740 case DISCOVERY_RESOLVING:
1741 break;
ff9ef578
JH
1742 case DISCOVERY_STOPPING:
1743 break;
1744 }
1745
1746 hdev->discovery.state = state;
1747}
1748
1f9b9a5d 1749void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1750{
30883512 1751 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1752 struct inquiry_entry *p, *n;
1da177e4 1753
561aafbc
JH
1754 list_for_each_entry_safe(p, n, &cache->all, all) {
1755 list_del(&p->all);
b57c1a56 1756 kfree(p);
1da177e4 1757 }
561aafbc
JH
1758
1759 INIT_LIST_HEAD(&cache->unknown);
1760 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1761}
1762
a8c5fb1a
GP
1763struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1764 bdaddr_t *bdaddr)
1da177e4 1765{
30883512 1766 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1767 struct inquiry_entry *e;
1768
6ed93dc6 1769 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1770
561aafbc
JH
1771 list_for_each_entry(e, &cache->all, all) {
1772 if (!bacmp(&e->data.bdaddr, bdaddr))
1773 return e;
1774 }
1775
1776 return NULL;
1777}
1778
1779struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1780 bdaddr_t *bdaddr)
561aafbc 1781{
30883512 1782 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1783 struct inquiry_entry *e;
1784
6ed93dc6 1785 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1786
1787 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1788 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1789 return e;
1790 }
1791
1792 return NULL;
1da177e4
LT
1793}
1794
30dc78e1 1795struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1796 bdaddr_t *bdaddr,
1797 int state)
30dc78e1
JH
1798{
1799 struct discovery_state *cache = &hdev->discovery;
1800 struct inquiry_entry *e;
1801
6ed93dc6 1802 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1803
1804 list_for_each_entry(e, &cache->resolve, list) {
1805 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1806 return e;
1807 if (!bacmp(&e->data.bdaddr, bdaddr))
1808 return e;
1809 }
1810
1811 return NULL;
1812}
1813
a3d4e20a 1814void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1815 struct inquiry_entry *ie)
a3d4e20a
JH
1816{
1817 struct discovery_state *cache = &hdev->discovery;
1818 struct list_head *pos = &cache->resolve;
1819 struct inquiry_entry *p;
1820
1821 list_del(&ie->list);
1822
1823 list_for_each_entry(p, &cache->resolve, list) {
1824 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1825 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1826 break;
1827 pos = &p->list;
1828 }
1829
1830 list_add(&ie->list, pos);
1831}
1832
3175405b 1833bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1834 bool name_known, bool *ssp)
1da177e4 1835{
30883512 1836 struct discovery_state *cache = &hdev->discovery;
70f23020 1837 struct inquiry_entry *ie;
1da177e4 1838
6ed93dc6 1839 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1840
2b2fec4d
SJ
1841 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1842
388fc8fa
JH
1843 if (ssp)
1844 *ssp = data->ssp_mode;
1845
70f23020 1846 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1847 if (ie) {
388fc8fa
JH
1848 if (ie->data.ssp_mode && ssp)
1849 *ssp = true;
1850
a3d4e20a 1851 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1852 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1853 ie->data.rssi = data->rssi;
1854 hci_inquiry_cache_update_resolve(hdev, ie);
1855 }
1856
561aafbc 1857 goto update;
a3d4e20a 1858 }
561aafbc
JH
1859
1860 /* Entry not in the cache. Add new one. */
1861 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1862 if (!ie)
3175405b 1863 return false;
561aafbc
JH
1864
1865 list_add(&ie->all, &cache->all);
1866
1867 if (name_known) {
1868 ie->name_state = NAME_KNOWN;
1869 } else {
1870 ie->name_state = NAME_NOT_KNOWN;
1871 list_add(&ie->list, &cache->unknown);
1872 }
70f23020 1873
561aafbc
JH
1874update:
1875 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1876 ie->name_state != NAME_PENDING) {
561aafbc
JH
1877 ie->name_state = NAME_KNOWN;
1878 list_del(&ie->list);
1da177e4
LT
1879 }
1880
70f23020
AE
1881 memcpy(&ie->data, data, sizeof(*data));
1882 ie->timestamp = jiffies;
1da177e4 1883 cache->timestamp = jiffies;
3175405b
JH
1884
1885 if (ie->name_state == NAME_NOT_KNOWN)
1886 return false;
1887
1888 return true;
1da177e4
LT
1889}
1890
1891static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1892{
30883512 1893 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1894 struct inquiry_info *info = (struct inquiry_info *) buf;
1895 struct inquiry_entry *e;
1896 int copied = 0;
1897
561aafbc 1898 list_for_each_entry(e, &cache->all, all) {
1da177e4 1899 struct inquiry_data *data = &e->data;
b57c1a56
JH
1900
1901 if (copied >= num)
1902 break;
1903
1da177e4
LT
1904 bacpy(&info->bdaddr, &data->bdaddr);
1905 info->pscan_rep_mode = data->pscan_rep_mode;
1906 info->pscan_period_mode = data->pscan_period_mode;
1907 info->pscan_mode = data->pscan_mode;
1908 memcpy(info->dev_class, data->dev_class, 3);
1909 info->clock_offset = data->clock_offset;
b57c1a56 1910
1da177e4 1911 info++;
b57c1a56 1912 copied++;
1da177e4
LT
1913 }
1914
1915 BT_DBG("cache %p, copied %d", cache, copied);
1916 return copied;
1917}
1918
42c6b129 1919static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1920{
1921 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1922 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1923 struct hci_cp_inquiry cp;
1924
1925 BT_DBG("%s", hdev->name);
1926
1927 if (test_bit(HCI_INQUIRY, &hdev->flags))
1928 return;
1929
1930 /* Start Inquiry */
1931 memcpy(&cp.lap, &ir->lap, 3);
1932 cp.length = ir->length;
1933 cp.num_rsp = ir->num_rsp;
42c6b129 1934 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1935}
1936
3e13fa1e
AG
1937static int wait_inquiry(void *word)
1938{
1939 schedule();
1940 return signal_pending(current);
1941}
1942
1da177e4
LT
1943int hci_inquiry(void __user *arg)
1944{
1945 __u8 __user *ptr = arg;
1946 struct hci_inquiry_req ir;
1947 struct hci_dev *hdev;
1948 int err = 0, do_inquiry = 0, max_rsp;
1949 long timeo;
1950 __u8 *buf;
1951
1952 if (copy_from_user(&ir, ptr, sizeof(ir)))
1953 return -EFAULT;
1954
5a08ecce
AE
1955 hdev = hci_dev_get(ir.dev_id);
1956 if (!hdev)
1da177e4
LT
1957 return -ENODEV;
1958
0736cfa8
MH
1959 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1960 err = -EBUSY;
1961 goto done;
1962 }
1963
5b69bef5
MH
1964 if (hdev->dev_type != HCI_BREDR) {
1965 err = -EOPNOTSUPP;
1966 goto done;
1967 }
1968
56f87901
JH
1969 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1970 err = -EOPNOTSUPP;
1971 goto done;
1972 }
1973
09fd0de5 1974 hci_dev_lock(hdev);
8e87d142 1975 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1976 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1977 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1978 do_inquiry = 1;
1979 }
09fd0de5 1980 hci_dev_unlock(hdev);
1da177e4 1981
04837f64 1982 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1983
1984 if (do_inquiry) {
01178cd4
JH
1985 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1986 timeo);
70f23020
AE
1987 if (err < 0)
1988 goto done;
3e13fa1e
AG
1989
1990 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1991 * cleared). If it is interrupted by a signal, return -EINTR.
1992 */
1993 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1994 TASK_INTERRUPTIBLE))
1995 return -EINTR;
70f23020 1996 }
1da177e4 1997
8fc9ced3
GP
1998 /* for unlimited number of responses we will use buffer with
1999 * 255 entries
2000 */
1da177e4
LT
2001 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2002
2003 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2004 * copy it to the user space.
2005 */
01df8c31 2006 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2007 if (!buf) {
1da177e4
LT
2008 err = -ENOMEM;
2009 goto done;
2010 }
2011
09fd0de5 2012 hci_dev_lock(hdev);
1da177e4 2013 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2014 hci_dev_unlock(hdev);
1da177e4
LT
2015
2016 BT_DBG("num_rsp %d", ir.num_rsp);
2017
2018 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2019 ptr += sizeof(ir);
2020 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2021 ir.num_rsp))
1da177e4 2022 err = -EFAULT;
8e87d142 2023 } else
1da177e4
LT
2024 err = -EFAULT;
2025
2026 kfree(buf);
2027
2028done:
2029 hci_dev_put(hdev);
2030 return err;
2031}
2032
cbed0ca1 2033static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2034{
1da177e4
LT
2035 int ret = 0;
2036
1da177e4
LT
2037 BT_DBG("%s %p", hdev->name, hdev);
2038
2039 hci_req_lock(hdev);
2040
94324962
JH
2041 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2042 ret = -ENODEV;
2043 goto done;
2044 }
2045
a5c8f270
MH
2046 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2047 /* Check for rfkill but allow the HCI setup stage to
2048 * proceed (which in itself doesn't cause any RF activity).
2049 */
2050 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2051 ret = -ERFKILL;
2052 goto done;
2053 }
2054
2055 /* Check for valid public address or a configured static
2056 * random adddress, but let the HCI setup proceed to
2057 * be able to determine if there is a public address
2058 * or not.
2059 *
c6beca0e
MH
2060 * In case of user channel usage, it is not important
2061 * if a public address or static random address is
2062 * available.
2063 *
a5c8f270
MH
2064 * This check is only valid for BR/EDR controllers
2065 * since AMP controllers do not have an address.
2066 */
c6beca0e
MH
2067 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2068 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2069 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2070 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2071 ret = -EADDRNOTAVAIL;
2072 goto done;
2073 }
611b30f7
MH
2074 }
2075
1da177e4
LT
2076 if (test_bit(HCI_UP, &hdev->flags)) {
2077 ret = -EALREADY;
2078 goto done;
2079 }
2080
1da177e4
LT
2081 if (hdev->open(hdev)) {
2082 ret = -EIO;
2083 goto done;
2084 }
2085
f41c70c4
MH
2086 atomic_set(&hdev->cmd_cnt, 1);
2087 set_bit(HCI_INIT, &hdev->flags);
2088
2089 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2090 ret = hdev->setup(hdev);
2091
2092 if (!ret) {
f41c70c4
MH
2093 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2094 set_bit(HCI_RAW, &hdev->flags);
2095
0736cfa8
MH
2096 if (!test_bit(HCI_RAW, &hdev->flags) &&
2097 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2098 ret = __hci_init(hdev);
1da177e4
LT
2099 }
2100
f41c70c4
MH
2101 clear_bit(HCI_INIT, &hdev->flags);
2102
1da177e4
LT
2103 if (!ret) {
2104 hci_dev_hold(hdev);
d6bfd59c 2105 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2106 set_bit(HCI_UP, &hdev->flags);
2107 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2108 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2109 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2110 hdev->dev_type == HCI_BREDR) {
09fd0de5 2111 hci_dev_lock(hdev);
744cf19e 2112 mgmt_powered(hdev, 1);
09fd0de5 2113 hci_dev_unlock(hdev);
56e5cb86 2114 }
8e87d142 2115 } else {
1da177e4 2116 /* Init failed, cleanup */
3eff45ea 2117 flush_work(&hdev->tx_work);
c347b765 2118 flush_work(&hdev->cmd_work);
b78752cc 2119 flush_work(&hdev->rx_work);
1da177e4
LT
2120
2121 skb_queue_purge(&hdev->cmd_q);
2122 skb_queue_purge(&hdev->rx_q);
2123
2124 if (hdev->flush)
2125 hdev->flush(hdev);
2126
2127 if (hdev->sent_cmd) {
2128 kfree_skb(hdev->sent_cmd);
2129 hdev->sent_cmd = NULL;
2130 }
2131
2132 hdev->close(hdev);
2133 hdev->flags = 0;
2134 }
2135
2136done:
2137 hci_req_unlock(hdev);
1da177e4
LT
2138 return ret;
2139}
2140
cbed0ca1
JH
2141/* ---- HCI ioctl helpers ---- */
2142
2143int hci_dev_open(__u16 dev)
2144{
2145 struct hci_dev *hdev;
2146 int err;
2147
2148 hdev = hci_dev_get(dev);
2149 if (!hdev)
2150 return -ENODEV;
2151
e1d08f40
JH
2152 /* We need to ensure that no other power on/off work is pending
2153 * before proceeding to call hci_dev_do_open. This is
2154 * particularly important if the setup procedure has not yet
2155 * completed.
2156 */
2157 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2158 cancel_delayed_work(&hdev->power_off);
2159
a5c8f270
MH
2160 /* After this call it is guaranteed that the setup procedure
2161 * has finished. This means that error conditions like RFKILL
2162 * or no valid public or static random address apply.
2163 */
e1d08f40
JH
2164 flush_workqueue(hdev->req_workqueue);
2165
cbed0ca1
JH
2166 err = hci_dev_do_open(hdev);
2167
2168 hci_dev_put(hdev);
2169
2170 return err;
2171}
2172
1da177e4
LT
2173static int hci_dev_do_close(struct hci_dev *hdev)
2174{
2175 BT_DBG("%s %p", hdev->name, hdev);
2176
78c04c0b
VCG
2177 cancel_delayed_work(&hdev->power_off);
2178
1da177e4
LT
2179 hci_req_cancel(hdev, ENODEV);
2180 hci_req_lock(hdev);
2181
2182 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2183 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2184 hci_req_unlock(hdev);
2185 return 0;
2186 }
2187
3eff45ea
GP
2188 /* Flush RX and TX works */
2189 flush_work(&hdev->tx_work);
b78752cc 2190 flush_work(&hdev->rx_work);
1da177e4 2191
16ab91ab 2192 if (hdev->discov_timeout > 0) {
e0f9309f 2193 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2194 hdev->discov_timeout = 0;
5e5282bb 2195 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2196 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2197 }
2198
a8b2d5c2 2199 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2200 cancel_delayed_work(&hdev->service_cache);
2201
7ba8b4be 2202 cancel_delayed_work_sync(&hdev->le_scan_disable);
d6bfd59c 2203 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2204
09fd0de5 2205 hci_dev_lock(hdev);
1f9b9a5d 2206 hci_inquiry_cache_flush(hdev);
1da177e4 2207 hci_conn_hash_flush(hdev);
09fd0de5 2208 hci_dev_unlock(hdev);
1da177e4
LT
2209
2210 hci_notify(hdev, HCI_DEV_DOWN);
2211
2212 if (hdev->flush)
2213 hdev->flush(hdev);
2214
2215 /* Reset device */
2216 skb_queue_purge(&hdev->cmd_q);
2217 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2218 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2219 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2220 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2221 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2222 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2223 clear_bit(HCI_INIT, &hdev->flags);
2224 }
2225
c347b765
GP
2226 /* flush cmd work */
2227 flush_work(&hdev->cmd_work);
1da177e4
LT
2228
2229 /* Drop queues */
2230 skb_queue_purge(&hdev->rx_q);
2231 skb_queue_purge(&hdev->cmd_q);
2232 skb_queue_purge(&hdev->raw_q);
2233
2234 /* Drop last sent command */
2235 if (hdev->sent_cmd) {
b79f44c1 2236 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2237 kfree_skb(hdev->sent_cmd);
2238 hdev->sent_cmd = NULL;
2239 }
2240
b6ddb638
JH
2241 kfree_skb(hdev->recv_evt);
2242 hdev->recv_evt = NULL;
2243
1da177e4
LT
2244 /* After this point our queues are empty
2245 * and no tasks are scheduled. */
2246 hdev->close(hdev);
2247
35b973c9
JH
2248 /* Clear flags */
2249 hdev->flags = 0;
2250 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2251
93c311a0
MH
2252 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2253 if (hdev->dev_type == HCI_BREDR) {
2254 hci_dev_lock(hdev);
2255 mgmt_powered(hdev, 0);
2256 hci_dev_unlock(hdev);
2257 }
8ee56540 2258 }
5add6af8 2259
ced5c338 2260 /* Controller radio is available but is currently powered down */
536619e8 2261 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2262
e59fda8d 2263 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2264 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2265 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2266
1da177e4
LT
2267 hci_req_unlock(hdev);
2268
2269 hci_dev_put(hdev);
2270 return 0;
2271}
2272
2273int hci_dev_close(__u16 dev)
2274{
2275 struct hci_dev *hdev;
2276 int err;
2277
70f23020
AE
2278 hdev = hci_dev_get(dev);
2279 if (!hdev)
1da177e4 2280 return -ENODEV;
8ee56540 2281
0736cfa8
MH
2282 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2283 err = -EBUSY;
2284 goto done;
2285 }
2286
8ee56540
MH
2287 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2288 cancel_delayed_work(&hdev->power_off);
2289
1da177e4 2290 err = hci_dev_do_close(hdev);
8ee56540 2291
0736cfa8 2292done:
1da177e4
LT
2293 hci_dev_put(hdev);
2294 return err;
2295}
2296
2297int hci_dev_reset(__u16 dev)
2298{
2299 struct hci_dev *hdev;
2300 int ret = 0;
2301
70f23020
AE
2302 hdev = hci_dev_get(dev);
2303 if (!hdev)
1da177e4
LT
2304 return -ENODEV;
2305
2306 hci_req_lock(hdev);
1da177e4 2307
808a049e
MH
2308 if (!test_bit(HCI_UP, &hdev->flags)) {
2309 ret = -ENETDOWN;
1da177e4 2310 goto done;
808a049e 2311 }
1da177e4 2312
0736cfa8
MH
2313 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2314 ret = -EBUSY;
2315 goto done;
2316 }
2317
1da177e4
LT
2318 /* Drop queues */
2319 skb_queue_purge(&hdev->rx_q);
2320 skb_queue_purge(&hdev->cmd_q);
2321
09fd0de5 2322 hci_dev_lock(hdev);
1f9b9a5d 2323 hci_inquiry_cache_flush(hdev);
1da177e4 2324 hci_conn_hash_flush(hdev);
09fd0de5 2325 hci_dev_unlock(hdev);
1da177e4
LT
2326
2327 if (hdev->flush)
2328 hdev->flush(hdev);
2329
8e87d142 2330 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2331 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2332
2333 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2334 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2335
2336done:
1da177e4
LT
2337 hci_req_unlock(hdev);
2338 hci_dev_put(hdev);
2339 return ret;
2340}
2341
2342int hci_dev_reset_stat(__u16 dev)
2343{
2344 struct hci_dev *hdev;
2345 int ret = 0;
2346
70f23020
AE
2347 hdev = hci_dev_get(dev);
2348 if (!hdev)
1da177e4
LT
2349 return -ENODEV;
2350
0736cfa8
MH
2351 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2352 ret = -EBUSY;
2353 goto done;
2354 }
2355
1da177e4
LT
2356 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2357
0736cfa8 2358done:
1da177e4 2359 hci_dev_put(hdev);
1da177e4
LT
2360 return ret;
2361}
2362
2363int hci_dev_cmd(unsigned int cmd, void __user *arg)
2364{
2365 struct hci_dev *hdev;
2366 struct hci_dev_req dr;
2367 int err = 0;
2368
2369 if (copy_from_user(&dr, arg, sizeof(dr)))
2370 return -EFAULT;
2371
70f23020
AE
2372 hdev = hci_dev_get(dr.dev_id);
2373 if (!hdev)
1da177e4
LT
2374 return -ENODEV;
2375
0736cfa8
MH
2376 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2377 err = -EBUSY;
2378 goto done;
2379 }
2380
5b69bef5
MH
2381 if (hdev->dev_type != HCI_BREDR) {
2382 err = -EOPNOTSUPP;
2383 goto done;
2384 }
2385
56f87901
JH
2386 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2387 err = -EOPNOTSUPP;
2388 goto done;
2389 }
2390
1da177e4
LT
2391 switch (cmd) {
2392 case HCISETAUTH:
01178cd4
JH
2393 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2394 HCI_INIT_TIMEOUT);
1da177e4
LT
2395 break;
2396
2397 case HCISETENCRYPT:
2398 if (!lmp_encrypt_capable(hdev)) {
2399 err = -EOPNOTSUPP;
2400 break;
2401 }
2402
2403 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2404 /* Auth must be enabled first */
01178cd4
JH
2405 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2406 HCI_INIT_TIMEOUT);
1da177e4
LT
2407 if (err)
2408 break;
2409 }
2410
01178cd4
JH
2411 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2412 HCI_INIT_TIMEOUT);
1da177e4
LT
2413 break;
2414
2415 case HCISETSCAN:
01178cd4
JH
2416 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2417 HCI_INIT_TIMEOUT);
1da177e4
LT
2418 break;
2419
1da177e4 2420 case HCISETLINKPOL:
01178cd4
JH
2421 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2422 HCI_INIT_TIMEOUT);
1da177e4
LT
2423 break;
2424
2425 case HCISETLINKMODE:
e4e8e37c
MH
2426 hdev->link_mode = ((__u16) dr.dev_opt) &
2427 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2428 break;
2429
2430 case HCISETPTYPE:
2431 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2432 break;
2433
2434 case HCISETACLMTU:
e4e8e37c
MH
2435 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2436 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2437 break;
2438
2439 case HCISETSCOMTU:
e4e8e37c
MH
2440 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2441 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2442 break;
2443
2444 default:
2445 err = -EINVAL;
2446 break;
2447 }
e4e8e37c 2448
0736cfa8 2449done:
1da177e4
LT
2450 hci_dev_put(hdev);
2451 return err;
2452}
2453
2454int hci_get_dev_list(void __user *arg)
2455{
8035ded4 2456 struct hci_dev *hdev;
1da177e4
LT
2457 struct hci_dev_list_req *dl;
2458 struct hci_dev_req *dr;
1da177e4
LT
2459 int n = 0, size, err;
2460 __u16 dev_num;
2461
2462 if (get_user(dev_num, (__u16 __user *) arg))
2463 return -EFAULT;
2464
2465 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2466 return -EINVAL;
2467
2468 size = sizeof(*dl) + dev_num * sizeof(*dr);
2469
70f23020
AE
2470 dl = kzalloc(size, GFP_KERNEL);
2471 if (!dl)
1da177e4
LT
2472 return -ENOMEM;
2473
2474 dr = dl->dev_req;
2475
f20d09d5 2476 read_lock(&hci_dev_list_lock);
8035ded4 2477 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2478 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2479 cancel_delayed_work(&hdev->power_off);
c542a06c 2480
a8b2d5c2
JH
2481 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2482 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2483
1da177e4
LT
2484 (dr + n)->dev_id = hdev->id;
2485 (dr + n)->dev_opt = hdev->flags;
c542a06c 2486
1da177e4
LT
2487 if (++n >= dev_num)
2488 break;
2489 }
f20d09d5 2490 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2491
2492 dl->dev_num = n;
2493 size = sizeof(*dl) + n * sizeof(*dr);
2494
2495 err = copy_to_user(arg, dl, size);
2496 kfree(dl);
2497
2498 return err ? -EFAULT : 0;
2499}
2500
2501int hci_get_dev_info(void __user *arg)
2502{
2503 struct hci_dev *hdev;
2504 struct hci_dev_info di;
2505 int err = 0;
2506
2507 if (copy_from_user(&di, arg, sizeof(di)))
2508 return -EFAULT;
2509
70f23020
AE
2510 hdev = hci_dev_get(di.dev_id);
2511 if (!hdev)
1da177e4
LT
2512 return -ENODEV;
2513
a8b2d5c2 2514 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2515 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2516
a8b2d5c2
JH
2517 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2518 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2519
1da177e4
LT
2520 strcpy(di.name, hdev->name);
2521 di.bdaddr = hdev->bdaddr;
60f2a3ed 2522 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2523 di.flags = hdev->flags;
2524 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2525 if (lmp_bredr_capable(hdev)) {
2526 di.acl_mtu = hdev->acl_mtu;
2527 di.acl_pkts = hdev->acl_pkts;
2528 di.sco_mtu = hdev->sco_mtu;
2529 di.sco_pkts = hdev->sco_pkts;
2530 } else {
2531 di.acl_mtu = hdev->le_mtu;
2532 di.acl_pkts = hdev->le_pkts;
2533 di.sco_mtu = 0;
2534 di.sco_pkts = 0;
2535 }
1da177e4
LT
2536 di.link_policy = hdev->link_policy;
2537 di.link_mode = hdev->link_mode;
2538
2539 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2540 memcpy(&di.features, &hdev->features, sizeof(di.features));
2541
2542 if (copy_to_user(arg, &di, sizeof(di)))
2543 err = -EFAULT;
2544
2545 hci_dev_put(hdev);
2546
2547 return err;
2548}
2549
2550/* ---- Interface to HCI drivers ---- */
2551
611b30f7
MH
2552static int hci_rfkill_set_block(void *data, bool blocked)
2553{
2554 struct hci_dev *hdev = data;
2555
2556 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2557
0736cfa8
MH
2558 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2559 return -EBUSY;
2560
5e130367
JH
2561 if (blocked) {
2562 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2563 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2564 hci_dev_do_close(hdev);
5e130367
JH
2565 } else {
2566 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2567 }
611b30f7
MH
2568
2569 return 0;
2570}
2571
2572static const struct rfkill_ops hci_rfkill_ops = {
2573 .set_block = hci_rfkill_set_block,
2574};
2575
ab81cbf9
JH
2576static void hci_power_on(struct work_struct *work)
2577{
2578 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2579 int err;
ab81cbf9
JH
2580
2581 BT_DBG("%s", hdev->name);
2582
cbed0ca1 2583 err = hci_dev_do_open(hdev);
96570ffc
JH
2584 if (err < 0) {
2585 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2586 return;
96570ffc 2587 }
ab81cbf9 2588
a5c8f270
MH
2589 /* During the HCI setup phase, a few error conditions are
2590 * ignored and they need to be checked now. If they are still
2591 * valid, it is important to turn the device back off.
2592 */
2593 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2594 (hdev->dev_type == HCI_BREDR &&
2595 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2596 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2597 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2598 hci_dev_do_close(hdev);
2599 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2600 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2601 HCI_AUTO_OFF_TIMEOUT);
bf543036 2602 }
ab81cbf9 2603
a8b2d5c2 2604 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2605 mgmt_index_added(hdev);
ab81cbf9
JH
2606}
2607
2608static void hci_power_off(struct work_struct *work)
2609{
3243553f 2610 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2611 power_off.work);
ab81cbf9
JH
2612
2613 BT_DBG("%s", hdev->name);
2614
8ee56540 2615 hci_dev_do_close(hdev);
ab81cbf9
JH
2616}
2617
16ab91ab
JH
2618static void hci_discov_off(struct work_struct *work)
2619{
2620 struct hci_dev *hdev;
16ab91ab
JH
2621
2622 hdev = container_of(work, struct hci_dev, discov_off.work);
2623
2624 BT_DBG("%s", hdev->name);
2625
d1967ff8 2626 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2627}
2628
35f7498a 2629void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2630{
4821002c 2631 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2632
4821002c
JH
2633 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2634 list_del(&uuid->list);
2aeb9a1a
JH
2635 kfree(uuid);
2636 }
2aeb9a1a
JH
2637}
2638
35f7498a 2639void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2640{
2641 struct list_head *p, *n;
2642
2643 list_for_each_safe(p, n, &hdev->link_keys) {
2644 struct link_key *key;
2645
2646 key = list_entry(p, struct link_key, list);
2647
2648 list_del(p);
2649 kfree(key);
2650 }
55ed8ca1
JH
2651}
2652
35f7498a 2653void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2654{
2655 struct smp_ltk *k, *tmp;
2656
2657 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2658 list_del(&k->list);
2659 kfree(k);
2660 }
b899efaf
VCG
2661}
2662
970c4e46
JH
2663void hci_smp_irks_clear(struct hci_dev *hdev)
2664{
2665 struct smp_irk *k, *tmp;
2666
2667 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2668 list_del(&k->list);
2669 kfree(k);
2670 }
2671}
2672
55ed8ca1
JH
2673struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2674{
8035ded4 2675 struct link_key *k;
55ed8ca1 2676
8035ded4 2677 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2678 if (bacmp(bdaddr, &k->bdaddr) == 0)
2679 return k;
55ed8ca1
JH
2680
2681 return NULL;
2682}
2683
745c0ce3 2684static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2685 u8 key_type, u8 old_key_type)
d25e28ab
JH
2686{
2687 /* Legacy key */
2688 if (key_type < 0x03)
745c0ce3 2689 return true;
d25e28ab
JH
2690
2691 /* Debug keys are insecure so don't store them persistently */
2692 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2693 return false;
d25e28ab
JH
2694
2695 /* Changed combination key and there's no previous one */
2696 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2697 return false;
d25e28ab
JH
2698
2699 /* Security mode 3 case */
2700 if (!conn)
745c0ce3 2701 return true;
d25e28ab
JH
2702
2703 /* Neither local nor remote side had no-bonding as requirement */
2704 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2705 return true;
d25e28ab
JH
2706
2707 /* Local side had dedicated bonding as requirement */
2708 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2709 return true;
d25e28ab
JH
2710
2711 /* Remote side had dedicated bonding as requirement */
2712 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2713 return true;
d25e28ab
JH
2714
2715 /* If none of the above criteria match, then don't store the key
2716 * persistently */
745c0ce3 2717 return false;
d25e28ab
JH
2718}
2719
98a0b845
JH
2720static bool ltk_type_master(u8 type)
2721{
2722 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2723 return true;
2724
2725 return false;
2726}
2727
2728struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2729 bool master)
75d262c2 2730{
c9839a11 2731 struct smp_ltk *k;
75d262c2 2732
c9839a11
VCG
2733 list_for_each_entry(k, &hdev->long_term_keys, list) {
2734 if (k->ediv != ediv ||
a8c5fb1a 2735 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2736 continue;
2737
98a0b845
JH
2738 if (ltk_type_master(k->type) != master)
2739 continue;
2740
c9839a11 2741 return k;
75d262c2
VCG
2742 }
2743
2744 return NULL;
2745}
75d262c2 2746
c9839a11 2747struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2748 u8 addr_type, bool master)
75d262c2 2749{
c9839a11 2750 struct smp_ltk *k;
75d262c2 2751
c9839a11
VCG
2752 list_for_each_entry(k, &hdev->long_term_keys, list)
2753 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2754 bacmp(bdaddr, &k->bdaddr) == 0 &&
2755 ltk_type_master(k->type) == master)
75d262c2
VCG
2756 return k;
2757
2758 return NULL;
2759}
75d262c2 2760
970c4e46
JH
2761struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2762{
2763 struct smp_irk *irk;
2764
2765 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2766 if (!bacmp(&irk->rpa, rpa))
2767 return irk;
2768 }
2769
2770 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2771 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2772 bacpy(&irk->rpa, rpa);
2773 return irk;
2774 }
2775 }
2776
2777 return NULL;
2778}
2779
2780struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2781 u8 addr_type)
2782{
2783 struct smp_irk *irk;
2784
6cfc9988
JH
2785 /* Identity Address must be public or static random */
2786 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2787 return NULL;
2788
970c4e46
JH
2789 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2790 if (addr_type == irk->addr_type &&
2791 bacmp(bdaddr, &irk->bdaddr) == 0)
2792 return irk;
2793 }
2794
2795 return NULL;
2796}
2797
d25e28ab 2798int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2799 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2800{
2801 struct link_key *key, *old_key;
745c0ce3
VA
2802 u8 old_key_type;
2803 bool persistent;
55ed8ca1
JH
2804
2805 old_key = hci_find_link_key(hdev, bdaddr);
2806 if (old_key) {
2807 old_key_type = old_key->type;
2808 key = old_key;
2809 } else {
12adcf3a 2810 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2811 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1
JH
2812 if (!key)
2813 return -ENOMEM;
2814 list_add(&key->list, &hdev->link_keys);
2815 }
2816
6ed93dc6 2817 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2818
d25e28ab
JH
2819 /* Some buggy controller combinations generate a changed
2820 * combination key for legacy pairing even when there's no
2821 * previous key */
2822 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2823 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2824 type = HCI_LK_COMBINATION;
655fe6ec
JH
2825 if (conn)
2826 conn->key_type = type;
2827 }
d25e28ab 2828
55ed8ca1 2829 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2830 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2831 key->pin_len = pin_len;
2832
b6020ba0 2833 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2834 key->type = old_key_type;
4748fed2
JH
2835 else
2836 key->type = type;
2837
4df378a1
JH
2838 if (!new_key)
2839 return 0;
2840
2841 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2842
744cf19e 2843 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2844
6ec5bcad
VA
2845 if (conn)
2846 conn->flush_key = !persistent;
55ed8ca1
JH
2847
2848 return 0;
2849}
2850
ca9142b8 2851struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271
JH
2852 u8 addr_type, u8 type, u8 authenticated,
2853 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
75d262c2 2854{
c9839a11 2855 struct smp_ltk *key, *old_key;
98a0b845 2856 bool master = ltk_type_master(type);
75d262c2 2857
98a0b845 2858 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 2859 if (old_key)
75d262c2 2860 key = old_key;
c9839a11 2861 else {
0a14ab41 2862 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2863 if (!key)
ca9142b8 2864 return NULL;
c9839a11 2865 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2866 }
2867
75d262c2 2868 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2869 key->bdaddr_type = addr_type;
2870 memcpy(key->val, tk, sizeof(key->val));
2871 key->authenticated = authenticated;
2872 key->ediv = ediv;
2873 key->enc_size = enc_size;
2874 key->type = type;
2875 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2876
ca9142b8 2877 return key;
75d262c2
VCG
2878}
2879
ca9142b8
JH
2880struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2881 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2882{
2883 struct smp_irk *irk;
2884
2885 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2886 if (!irk) {
2887 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2888 if (!irk)
ca9142b8 2889 return NULL;
970c4e46
JH
2890
2891 bacpy(&irk->bdaddr, bdaddr);
2892 irk->addr_type = addr_type;
2893
2894 list_add(&irk->list, &hdev->identity_resolving_keys);
2895 }
2896
2897 memcpy(irk->val, val, 16);
2898 bacpy(&irk->rpa, rpa);
2899
ca9142b8 2900 return irk;
970c4e46
JH
2901}
2902
55ed8ca1
JH
2903int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2904{
2905 struct link_key *key;
2906
2907 key = hci_find_link_key(hdev, bdaddr);
2908 if (!key)
2909 return -ENOENT;
2910
6ed93dc6 2911 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2912
2913 list_del(&key->list);
2914 kfree(key);
2915
2916 return 0;
2917}
2918
e0b2b27e 2919int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
2920{
2921 struct smp_ltk *k, *tmp;
c51ffa0b 2922 int removed = 0;
b899efaf
VCG
2923
2924 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 2925 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2926 continue;
2927
6ed93dc6 2928 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2929
2930 list_del(&k->list);
2931 kfree(k);
c51ffa0b 2932 removed++;
b899efaf
VCG
2933 }
2934
c51ffa0b 2935 return removed ? 0 : -ENOENT;
b899efaf
VCG
2936}
2937
a7ec7338
JH
2938void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2939{
2940 struct smp_irk *k, *tmp;
2941
668b7b19 2942 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2943 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2944 continue;
2945
2946 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2947
2948 list_del(&k->list);
2949 kfree(k);
2950 }
2951}
2952
6bd32326 2953/* HCI command timer function */
bda4f23a 2954static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2955{
2956 struct hci_dev *hdev = (void *) arg;
2957
bda4f23a
AE
2958 if (hdev->sent_cmd) {
2959 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2960 u16 opcode = __le16_to_cpu(sent->opcode);
2961
2962 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2963 } else {
2964 BT_ERR("%s command tx timeout", hdev->name);
2965 }
2966
6bd32326 2967 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2968 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2969}
2970
2763eda6 2971struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2972 bdaddr_t *bdaddr)
2763eda6
SJ
2973{
2974 struct oob_data *data;
2975
2976 list_for_each_entry(data, &hdev->remote_oob_data, list)
2977 if (bacmp(bdaddr, &data->bdaddr) == 0)
2978 return data;
2979
2980 return NULL;
2981}
2982
2983int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2984{
2985 struct oob_data *data;
2986
2987 data = hci_find_remote_oob_data(hdev, bdaddr);
2988 if (!data)
2989 return -ENOENT;
2990
6ed93dc6 2991 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2992
2993 list_del(&data->list);
2994 kfree(data);
2995
2996 return 0;
2997}
2998
35f7498a 2999void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3000{
3001 struct oob_data *data, *n;
3002
3003 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3004 list_del(&data->list);
3005 kfree(data);
3006 }
2763eda6
SJ
3007}
3008
0798872e
MH
3009int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3010 u8 *hash, u8 *randomizer)
2763eda6
SJ
3011{
3012 struct oob_data *data;
3013
3014 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3015 if (!data) {
0a14ab41 3016 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3017 if (!data)
3018 return -ENOMEM;
3019
3020 bacpy(&data->bdaddr, bdaddr);
3021 list_add(&data->list, &hdev->remote_oob_data);
3022 }
3023
519ca9d0
MH
3024 memcpy(data->hash192, hash, sizeof(data->hash192));
3025 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3026
0798872e
MH
3027 memset(data->hash256, 0, sizeof(data->hash256));
3028 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3029
3030 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3031
3032 return 0;
3033}
3034
3035int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3036 u8 *hash192, u8 *randomizer192,
3037 u8 *hash256, u8 *randomizer256)
3038{
3039 struct oob_data *data;
3040
3041 data = hci_find_remote_oob_data(hdev, bdaddr);
3042 if (!data) {
0a14ab41 3043 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3044 if (!data)
3045 return -ENOMEM;
3046
3047 bacpy(&data->bdaddr, bdaddr);
3048 list_add(&data->list, &hdev->remote_oob_data);
3049 }
3050
3051 memcpy(data->hash192, hash192, sizeof(data->hash192));
3052 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3053
3054 memcpy(data->hash256, hash256, sizeof(data->hash256));
3055 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3056
6ed93dc6 3057 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3058
3059 return 0;
3060}
3061
b9ee0a78
MH
3062struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3063 bdaddr_t *bdaddr, u8 type)
b2a66aad 3064{
8035ded4 3065 struct bdaddr_list *b;
b2a66aad 3066
b9ee0a78
MH
3067 list_for_each_entry(b, &hdev->blacklist, list) {
3068 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3069 return b;
b9ee0a78 3070 }
b2a66aad
AJ
3071
3072 return NULL;
3073}
3074
35f7498a 3075void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3076{
3077 struct list_head *p, *n;
3078
3079 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3080 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3081
3082 list_del(p);
3083 kfree(b);
3084 }
b2a66aad
AJ
3085}
3086
88c1fe4b 3087int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3088{
3089 struct bdaddr_list *entry;
b2a66aad 3090
b9ee0a78 3091 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3092 return -EBADF;
3093
b9ee0a78 3094 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3095 return -EEXIST;
b2a66aad
AJ
3096
3097 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3098 if (!entry)
3099 return -ENOMEM;
b2a66aad
AJ
3100
3101 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3102 entry->bdaddr_type = type;
b2a66aad
AJ
3103
3104 list_add(&entry->list, &hdev->blacklist);
3105
88c1fe4b 3106 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
3107}
3108
88c1fe4b 3109int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3110{
3111 struct bdaddr_list *entry;
b2a66aad 3112
35f7498a
JH
3113 if (!bacmp(bdaddr, BDADDR_ANY)) {
3114 hci_blacklist_clear(hdev);
3115 return 0;
3116 }
b2a66aad 3117
b9ee0a78 3118 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3119 if (!entry)
5e762444 3120 return -ENOENT;
b2a66aad
AJ
3121
3122 list_del(&entry->list);
3123 kfree(entry);
3124
88c1fe4b 3125 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3126}
3127
15819a70
AG
3128/* This function requires the caller holds hdev->lock */
3129struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3130 bdaddr_t *addr, u8 addr_type)
3131{
3132 struct hci_conn_params *params;
3133
3134 list_for_each_entry(params, &hdev->le_conn_params, list) {
3135 if (bacmp(&params->addr, addr) == 0 &&
3136 params->addr_type == addr_type) {
3137 return params;
3138 }
3139 }
3140
3141 return NULL;
3142}
3143
3144/* This function requires the caller holds hdev->lock */
3145void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3146 u16 conn_min_interval, u16 conn_max_interval)
3147{
3148 struct hci_conn_params *params;
3149
3150 params = hci_conn_params_lookup(hdev, addr, addr_type);
3151 if (params) {
3152 params->conn_min_interval = conn_min_interval;
3153 params->conn_max_interval = conn_max_interval;
3154 return;
3155 }
3156
3157 params = kzalloc(sizeof(*params), GFP_KERNEL);
3158 if (!params) {
3159 BT_ERR("Out of memory");
3160 return;
3161 }
3162
3163 bacpy(&params->addr, addr);
3164 params->addr_type = addr_type;
3165 params->conn_min_interval = conn_min_interval;
3166 params->conn_max_interval = conn_max_interval;
3167
3168 list_add(&params->list, &hdev->le_conn_params);
3169
3170 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3171 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3172 conn_max_interval);
3173}
3174
3175/* This function requires the caller holds hdev->lock */
3176void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3177{
3178 struct hci_conn_params *params;
3179
3180 params = hci_conn_params_lookup(hdev, addr, addr_type);
3181 if (!params)
3182 return;
3183
3184 list_del(&params->list);
3185 kfree(params);
3186
3187 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3188}
3189
3190/* This function requires the caller holds hdev->lock */
3191void hci_conn_params_clear(struct hci_dev *hdev)
3192{
3193 struct hci_conn_params *params, *tmp;
3194
3195 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3196 list_del(&params->list);
3197 kfree(params);
3198 }
3199
3200 BT_DBG("All LE connection parameters were removed");
3201}
3202
4c87eaab 3203static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3204{
4c87eaab
AG
3205 if (status) {
3206 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3207
4c87eaab
AG
3208 hci_dev_lock(hdev);
3209 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3210 hci_dev_unlock(hdev);
3211 return;
3212 }
7ba8b4be
AG
3213}
3214
4c87eaab 3215static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3216{
4c87eaab
AG
3217 /* General inquiry access code (GIAC) */
3218 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3219 struct hci_request req;
3220 struct hci_cp_inquiry cp;
7ba8b4be
AG
3221 int err;
3222
4c87eaab
AG
3223 if (status) {
3224 BT_ERR("Failed to disable LE scanning: status %d", status);
3225 return;
3226 }
7ba8b4be 3227
4c87eaab
AG
3228 switch (hdev->discovery.type) {
3229 case DISCOV_TYPE_LE:
3230 hci_dev_lock(hdev);
3231 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3232 hci_dev_unlock(hdev);
3233 break;
7ba8b4be 3234
4c87eaab
AG
3235 case DISCOV_TYPE_INTERLEAVED:
3236 hci_req_init(&req, hdev);
7ba8b4be 3237
4c87eaab
AG
3238 memset(&cp, 0, sizeof(cp));
3239 memcpy(&cp.lap, lap, sizeof(cp.lap));
3240 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3241 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3242
4c87eaab 3243 hci_dev_lock(hdev);
7dbfac1d 3244
4c87eaab 3245 hci_inquiry_cache_flush(hdev);
7dbfac1d 3246
4c87eaab
AG
3247 err = hci_req_run(&req, inquiry_complete);
3248 if (err) {
3249 BT_ERR("Inquiry request failed: err %d", err);
3250 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3251 }
7dbfac1d 3252
4c87eaab
AG
3253 hci_dev_unlock(hdev);
3254 break;
7dbfac1d 3255 }
7dbfac1d
AG
3256}
3257
7ba8b4be
AG
3258static void le_scan_disable_work(struct work_struct *work)
3259{
3260 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3261 le_scan_disable.work);
7ba8b4be 3262 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
3263 struct hci_request req;
3264 int err;
7ba8b4be
AG
3265
3266 BT_DBG("%s", hdev->name);
3267
4c87eaab 3268 hci_req_init(&req, hdev);
28b75a89 3269
7ba8b4be 3270 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
3271 cp.enable = LE_SCAN_DISABLE;
3272 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 3273
4c87eaab
AG
3274 err = hci_req_run(&req, le_scan_disable_work_complete);
3275 if (err)
3276 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3277}
3278
ebd3a747
JH
3279int hci_update_random_address(struct hci_request *req, u8 *own_addr_type)
3280{
3281 struct hci_dev *hdev = req->hdev;
3282 int err;
3283
3284 /* If privacy is enabled use a resolvable private address. If
3285 * the current RPA has expired or there's something else than an
3286 * RPA currently in use regenerate a new one.
3287 */
3288 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3289 bdaddr_t rpa;
3290 int to;
3291
3292 *own_addr_type = ADDR_LE_DEV_RANDOM;
3293
3294 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3295 hci_bdaddr_is_rpa(&hdev->random_addr, ADDR_LE_DEV_RANDOM))
3296 return 0;
3297
3298 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &rpa);
3299 if (err < 0) {
3300 BT_ERR("%s failed to generate new RPA", hdev->name);
3301 return err;
3302 }
3303
3304 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &rpa);
3305
3306 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3307 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3308
3309 return 0;
3310 }
3311
3312 /* If forcing static address is in use or there is no public
3313 * address use the static address as random address (but skip
3314 * the HCI command if the current random address is already the
3315 * static one.
3316 */
3317 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3318 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3319 *own_addr_type = ADDR_LE_DEV_RANDOM;
3320 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3321 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3322 &hdev->static_addr);
3323 return 0;
3324 }
3325
3326 /* Neither privacy nor static address is being used so use a
3327 * public address.
3328 */
3329 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3330
3331 return 0;
3332}
3333
9be0dab7
DH
3334/* Alloc HCI device */
3335struct hci_dev *hci_alloc_dev(void)
3336{
3337 struct hci_dev *hdev;
3338
3339 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3340 if (!hdev)
3341 return NULL;
3342
b1b813d4
DH
3343 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3344 hdev->esco_type = (ESCO_HV1);
3345 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3346 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3347 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3348 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3349 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3350
b1b813d4
DH
3351 hdev->sniff_max_interval = 800;
3352 hdev->sniff_min_interval = 80;
3353
3f959d46 3354 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3355 hdev->le_scan_interval = 0x0060;
3356 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3357 hdev->le_conn_min_interval = 0x0028;
3358 hdev->le_conn_max_interval = 0x0038;
bef64738 3359
d6bfd59c
JH
3360 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3361
b1b813d4
DH
3362 mutex_init(&hdev->lock);
3363 mutex_init(&hdev->req_lock);
3364
3365 INIT_LIST_HEAD(&hdev->mgmt_pending);
3366 INIT_LIST_HEAD(&hdev->blacklist);
3367 INIT_LIST_HEAD(&hdev->uuids);
3368 INIT_LIST_HEAD(&hdev->link_keys);
3369 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3370 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3371 INIT_LIST_HEAD(&hdev->remote_oob_data);
15819a70 3372 INIT_LIST_HEAD(&hdev->le_conn_params);
6b536b5e 3373 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3374
3375 INIT_WORK(&hdev->rx_work, hci_rx_work);
3376 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3377 INIT_WORK(&hdev->tx_work, hci_tx_work);
3378 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3379
b1b813d4
DH
3380 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3381 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3382 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3383
b1b813d4
DH
3384 skb_queue_head_init(&hdev->rx_q);
3385 skb_queue_head_init(&hdev->cmd_q);
3386 skb_queue_head_init(&hdev->raw_q);
3387
3388 init_waitqueue_head(&hdev->req_wait_q);
3389
bda4f23a 3390 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 3391
b1b813d4
DH
3392 hci_init_sysfs(hdev);
3393 discovery_init(hdev);
9be0dab7
DH
3394
3395 return hdev;
3396}
3397EXPORT_SYMBOL(hci_alloc_dev);
3398
3399/* Free HCI device */
3400void hci_free_dev(struct hci_dev *hdev)
3401{
9be0dab7
DH
3402 /* will free via device release */
3403 put_device(&hdev->dev);
3404}
3405EXPORT_SYMBOL(hci_free_dev);
3406
1da177e4
LT
3407/* Register HCI device */
3408int hci_register_dev(struct hci_dev *hdev)
3409{
b1b813d4 3410 int id, error;
1da177e4 3411
010666a1 3412 if (!hdev->open || !hdev->close)
1da177e4
LT
3413 return -EINVAL;
3414
08add513
MM
3415 /* Do not allow HCI_AMP devices to register at index 0,
3416 * so the index can be used as the AMP controller ID.
3417 */
3df92b31
SL
3418 switch (hdev->dev_type) {
3419 case HCI_BREDR:
3420 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3421 break;
3422 case HCI_AMP:
3423 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3424 break;
3425 default:
3426 return -EINVAL;
1da177e4 3427 }
8e87d142 3428
3df92b31
SL
3429 if (id < 0)
3430 return id;
3431
1da177e4
LT
3432 sprintf(hdev->name, "hci%d", id);
3433 hdev->id = id;
2d8b3a11
AE
3434
3435 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3436
d8537548
KC
3437 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3438 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3439 if (!hdev->workqueue) {
3440 error = -ENOMEM;
3441 goto err;
3442 }
f48fd9c8 3443
d8537548
KC
3444 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3445 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3446 if (!hdev->req_workqueue) {
3447 destroy_workqueue(hdev->workqueue);
3448 error = -ENOMEM;
3449 goto err;
3450 }
3451
0153e2ec
MH
3452 if (!IS_ERR_OR_NULL(bt_debugfs))
3453 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3454
bdc3e0f1
MH
3455 dev_set_name(&hdev->dev, "%s", hdev->name);
3456
99780a7b
JH
3457 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3458 CRYPTO_ALG_ASYNC);
3459 if (IS_ERR(hdev->tfm_aes)) {
3460 BT_ERR("Unable to create crypto context");
3461 error = PTR_ERR(hdev->tfm_aes);
3462 hdev->tfm_aes = NULL;
3463 goto err_wqueue;
3464 }
3465
bdc3e0f1 3466 error = device_add(&hdev->dev);
33ca954d 3467 if (error < 0)
99780a7b 3468 goto err_tfm;
1da177e4 3469
611b30f7 3470 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3471 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3472 hdev);
611b30f7
MH
3473 if (hdev->rfkill) {
3474 if (rfkill_register(hdev->rfkill) < 0) {
3475 rfkill_destroy(hdev->rfkill);
3476 hdev->rfkill = NULL;
3477 }
3478 }
3479
5e130367
JH
3480 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3481 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3482
a8b2d5c2 3483 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3484 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3485
01cd3404 3486 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3487 /* Assume BR/EDR support until proven otherwise (such as
3488 * through reading supported features during init.
3489 */
3490 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3491 }
ce2be9ac 3492
fcee3377
GP
3493 write_lock(&hci_dev_list_lock);
3494 list_add(&hdev->list, &hci_dev_list);
3495 write_unlock(&hci_dev_list_lock);
3496
1da177e4 3497 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3498 hci_dev_hold(hdev);
1da177e4 3499
19202573 3500 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3501
1da177e4 3502 return id;
f48fd9c8 3503
99780a7b
JH
3504err_tfm:
3505 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3506err_wqueue:
3507 destroy_workqueue(hdev->workqueue);
6ead1bbc 3508 destroy_workqueue(hdev->req_workqueue);
33ca954d 3509err:
3df92b31 3510 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3511
33ca954d 3512 return error;
1da177e4
LT
3513}
3514EXPORT_SYMBOL(hci_register_dev);
3515
3516/* Unregister HCI device */
59735631 3517void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3518{
3df92b31 3519 int i, id;
ef222013 3520
c13854ce 3521 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3522
94324962
JH
3523 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3524
3df92b31
SL
3525 id = hdev->id;
3526
f20d09d5 3527 write_lock(&hci_dev_list_lock);
1da177e4 3528 list_del(&hdev->list);
f20d09d5 3529 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3530
3531 hci_dev_do_close(hdev);
3532
cd4c5391 3533 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3534 kfree_skb(hdev->reassembly[i]);
3535
b9b5ef18
GP
3536 cancel_work_sync(&hdev->power_on);
3537
ab81cbf9 3538 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3539 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3540 hci_dev_lock(hdev);
744cf19e 3541 mgmt_index_removed(hdev);
09fd0de5 3542 hci_dev_unlock(hdev);
56e5cb86 3543 }
ab81cbf9 3544
2e58ef3e
JH
3545 /* mgmt_index_removed should take care of emptying the
3546 * pending list */
3547 BUG_ON(!list_empty(&hdev->mgmt_pending));
3548
1da177e4
LT
3549 hci_notify(hdev, HCI_DEV_UNREG);
3550
611b30f7
MH
3551 if (hdev->rfkill) {
3552 rfkill_unregister(hdev->rfkill);
3553 rfkill_destroy(hdev->rfkill);
3554 }
3555
99780a7b
JH
3556 if (hdev->tfm_aes)
3557 crypto_free_blkcipher(hdev->tfm_aes);
3558
bdc3e0f1 3559 device_del(&hdev->dev);
147e2d59 3560
0153e2ec
MH
3561 debugfs_remove_recursive(hdev->debugfs);
3562
f48fd9c8 3563 destroy_workqueue(hdev->workqueue);
6ead1bbc 3564 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3565
09fd0de5 3566 hci_dev_lock(hdev);
e2e0cacb 3567 hci_blacklist_clear(hdev);
2aeb9a1a 3568 hci_uuids_clear(hdev);
55ed8ca1 3569 hci_link_keys_clear(hdev);
b899efaf 3570 hci_smp_ltks_clear(hdev);
970c4e46 3571 hci_smp_irks_clear(hdev);
2763eda6 3572 hci_remote_oob_data_clear(hdev);
15819a70 3573 hci_conn_params_clear(hdev);
09fd0de5 3574 hci_dev_unlock(hdev);
e2e0cacb 3575
dc946bd8 3576 hci_dev_put(hdev);
3df92b31
SL
3577
3578 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3579}
3580EXPORT_SYMBOL(hci_unregister_dev);
3581
3582/* Suspend HCI device */
3583int hci_suspend_dev(struct hci_dev *hdev)
3584{
3585 hci_notify(hdev, HCI_DEV_SUSPEND);
3586 return 0;
3587}
3588EXPORT_SYMBOL(hci_suspend_dev);
3589
3590/* Resume HCI device */
3591int hci_resume_dev(struct hci_dev *hdev)
3592{
3593 hci_notify(hdev, HCI_DEV_RESUME);
3594 return 0;
3595}
3596EXPORT_SYMBOL(hci_resume_dev);
3597
76bca880 3598/* Receive frame from HCI drivers */
e1a26170 3599int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3600{
76bca880 3601 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3602 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3603 kfree_skb(skb);
3604 return -ENXIO;
3605 }
3606
d82603c6 3607 /* Incoming skb */
76bca880
MH
3608 bt_cb(skb)->incoming = 1;
3609
3610 /* Time stamp */
3611 __net_timestamp(skb);
3612
76bca880 3613 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3614 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3615
76bca880
MH
3616 return 0;
3617}
3618EXPORT_SYMBOL(hci_recv_frame);
3619
33e882a5 3620static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3621 int count, __u8 index)
33e882a5
SS
3622{
3623 int len = 0;
3624 int hlen = 0;
3625 int remain = count;
3626 struct sk_buff *skb;
3627 struct bt_skb_cb *scb;
3628
3629 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3630 index >= NUM_REASSEMBLY)
33e882a5
SS
3631 return -EILSEQ;
3632
3633 skb = hdev->reassembly[index];
3634
3635 if (!skb) {
3636 switch (type) {
3637 case HCI_ACLDATA_PKT:
3638 len = HCI_MAX_FRAME_SIZE;
3639 hlen = HCI_ACL_HDR_SIZE;
3640 break;
3641 case HCI_EVENT_PKT:
3642 len = HCI_MAX_EVENT_SIZE;
3643 hlen = HCI_EVENT_HDR_SIZE;
3644 break;
3645 case HCI_SCODATA_PKT:
3646 len = HCI_MAX_SCO_SIZE;
3647 hlen = HCI_SCO_HDR_SIZE;
3648 break;
3649 }
3650
1e429f38 3651 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3652 if (!skb)
3653 return -ENOMEM;
3654
3655 scb = (void *) skb->cb;
3656 scb->expect = hlen;
3657 scb->pkt_type = type;
3658
33e882a5
SS
3659 hdev->reassembly[index] = skb;
3660 }
3661
3662 while (count) {
3663 scb = (void *) skb->cb;
89bb46d0 3664 len = min_t(uint, scb->expect, count);
33e882a5
SS
3665
3666 memcpy(skb_put(skb, len), data, len);
3667
3668 count -= len;
3669 data += len;
3670 scb->expect -= len;
3671 remain = count;
3672
3673 switch (type) {
3674 case HCI_EVENT_PKT:
3675 if (skb->len == HCI_EVENT_HDR_SIZE) {
3676 struct hci_event_hdr *h = hci_event_hdr(skb);
3677 scb->expect = h->plen;
3678
3679 if (skb_tailroom(skb) < scb->expect) {
3680 kfree_skb(skb);
3681 hdev->reassembly[index] = NULL;
3682 return -ENOMEM;
3683 }
3684 }
3685 break;
3686
3687 case HCI_ACLDATA_PKT:
3688 if (skb->len == HCI_ACL_HDR_SIZE) {
3689 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3690 scb->expect = __le16_to_cpu(h->dlen);
3691
3692 if (skb_tailroom(skb) < scb->expect) {
3693 kfree_skb(skb);
3694 hdev->reassembly[index] = NULL;
3695 return -ENOMEM;
3696 }
3697 }
3698 break;
3699
3700 case HCI_SCODATA_PKT:
3701 if (skb->len == HCI_SCO_HDR_SIZE) {
3702 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3703 scb->expect = h->dlen;
3704
3705 if (skb_tailroom(skb) < scb->expect) {
3706 kfree_skb(skb);
3707 hdev->reassembly[index] = NULL;
3708 return -ENOMEM;
3709 }
3710 }
3711 break;
3712 }
3713
3714 if (scb->expect == 0) {
3715 /* Complete frame */
3716
3717 bt_cb(skb)->pkt_type = type;
e1a26170 3718 hci_recv_frame(hdev, skb);
33e882a5
SS
3719
3720 hdev->reassembly[index] = NULL;
3721 return remain;
3722 }
3723 }
3724
3725 return remain;
3726}
3727
ef222013
MH
3728int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3729{
f39a3c06
SS
3730 int rem = 0;
3731
ef222013
MH
3732 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3733 return -EILSEQ;
3734
da5f6c37 3735 while (count) {
1e429f38 3736 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
3737 if (rem < 0)
3738 return rem;
ef222013 3739
f39a3c06
SS
3740 data += (count - rem);
3741 count = rem;
f81c6224 3742 }
ef222013 3743
f39a3c06 3744 return rem;
ef222013
MH
3745}
3746EXPORT_SYMBOL(hci_recv_fragment);
3747
99811510
SS
3748#define STREAM_REASSEMBLY 0
3749
3750int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3751{
3752 int type;
3753 int rem = 0;
3754
da5f6c37 3755 while (count) {
99811510
SS
3756 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3757
3758 if (!skb) {
3759 struct { char type; } *pkt;
3760
3761 /* Start of the frame */
3762 pkt = data;
3763 type = pkt->type;
3764
3765 data++;
3766 count--;
3767 } else
3768 type = bt_cb(skb)->pkt_type;
3769
1e429f38 3770 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3771 STREAM_REASSEMBLY);
99811510
SS
3772 if (rem < 0)
3773 return rem;
3774
3775 data += (count - rem);
3776 count = rem;
f81c6224 3777 }
99811510
SS
3778
3779 return rem;
3780}
3781EXPORT_SYMBOL(hci_recv_stream_fragment);
3782
1da177e4
LT
3783/* ---- Interface to upper protocols ---- */
3784
1da177e4
LT
3785int hci_register_cb(struct hci_cb *cb)
3786{
3787 BT_DBG("%p name %s", cb, cb->name);
3788
f20d09d5 3789 write_lock(&hci_cb_list_lock);
1da177e4 3790 list_add(&cb->list, &hci_cb_list);
f20d09d5 3791 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3792
3793 return 0;
3794}
3795EXPORT_SYMBOL(hci_register_cb);
3796
3797int hci_unregister_cb(struct hci_cb *cb)
3798{
3799 BT_DBG("%p name %s", cb, cb->name);
3800
f20d09d5 3801 write_lock(&hci_cb_list_lock);
1da177e4 3802 list_del(&cb->list);
f20d09d5 3803 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3804
3805 return 0;
3806}
3807EXPORT_SYMBOL(hci_unregister_cb);
3808
51086991 3809static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3810{
0d48d939 3811 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3812
cd82e61c
MH
3813 /* Time stamp */
3814 __net_timestamp(skb);
1da177e4 3815
cd82e61c
MH
3816 /* Send copy to monitor */
3817 hci_send_to_monitor(hdev, skb);
3818
3819 if (atomic_read(&hdev->promisc)) {
3820 /* Send copy to the sockets */
470fe1b5 3821 hci_send_to_sock(hdev, skb);
1da177e4
LT
3822 }
3823
3824 /* Get rid of skb owner, prior to sending to the driver. */
3825 skb_orphan(skb);
3826
7bd8f09f 3827 if (hdev->send(hdev, skb) < 0)
51086991 3828 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
3829}
3830
3119ae95
JH
3831void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3832{
3833 skb_queue_head_init(&req->cmd_q);
3834 req->hdev = hdev;
5d73e034 3835 req->err = 0;
3119ae95
JH
3836}
3837
3838int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3839{
3840 struct hci_dev *hdev = req->hdev;
3841 struct sk_buff *skb;
3842 unsigned long flags;
3843
3844 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3845
5d73e034
AG
3846 /* If an error occured during request building, remove all HCI
3847 * commands queued on the HCI request queue.
3848 */
3849 if (req->err) {
3850 skb_queue_purge(&req->cmd_q);
3851 return req->err;
3852 }
3853
3119ae95
JH
3854 /* Do not allow empty requests */
3855 if (skb_queue_empty(&req->cmd_q))
382b0c39 3856 return -ENODATA;
3119ae95
JH
3857
3858 skb = skb_peek_tail(&req->cmd_q);
3859 bt_cb(skb)->req.complete = complete;
3860
3861 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3862 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3863 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3864
3865 queue_work(hdev->workqueue, &hdev->cmd_work);
3866
3867 return 0;
3868}
3869
1ca3a9d0 3870static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 3871 u32 plen, const void *param)
1da177e4
LT
3872{
3873 int len = HCI_COMMAND_HDR_SIZE + plen;
3874 struct hci_command_hdr *hdr;
3875 struct sk_buff *skb;
3876
1da177e4 3877 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
3878 if (!skb)
3879 return NULL;
1da177e4
LT
3880
3881 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 3882 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
3883 hdr->plen = plen;
3884
3885 if (plen)
3886 memcpy(skb_put(skb, plen), param, plen);
3887
3888 BT_DBG("skb len %d", skb->len);
3889
0d48d939 3890 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 3891
1ca3a9d0
JH
3892 return skb;
3893}
3894
3895/* Send HCI command */
07dc93dd
JH
3896int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3897 const void *param)
1ca3a9d0
JH
3898{
3899 struct sk_buff *skb;
3900
3901 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3902
3903 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3904 if (!skb) {
3905 BT_ERR("%s no memory for command", hdev->name);
3906 return -ENOMEM;
3907 }
3908
11714b3d
JH
3909 /* Stand-alone HCI commands must be flaged as
3910 * single-command requests.
3911 */
3912 bt_cb(skb)->req.start = true;
3913
1da177e4 3914 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3915 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3916
3917 return 0;
3918}
1da177e4 3919
71c76a17 3920/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
3921void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3922 const void *param, u8 event)
71c76a17
JH
3923{
3924 struct hci_dev *hdev = req->hdev;
3925 struct sk_buff *skb;
3926
3927 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3928
34739c1e
AG
3929 /* If an error occured during request building, there is no point in
3930 * queueing the HCI command. We can simply return.
3931 */
3932 if (req->err)
3933 return;
3934
71c76a17
JH
3935 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3936 if (!skb) {
5d73e034
AG
3937 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3938 hdev->name, opcode);
3939 req->err = -ENOMEM;
e348fe6b 3940 return;
71c76a17
JH
3941 }
3942
3943 if (skb_queue_empty(&req->cmd_q))
3944 bt_cb(skb)->req.start = true;
3945
02350a72
JH
3946 bt_cb(skb)->req.event = event;
3947
71c76a17 3948 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
3949}
3950
07dc93dd
JH
3951void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3952 const void *param)
02350a72
JH
3953{
3954 hci_req_add_ev(req, opcode, plen, param, 0);
3955}
3956
1da177e4 3957/* Get data from the previously sent command */
a9de9248 3958void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3959{
3960 struct hci_command_hdr *hdr;
3961
3962 if (!hdev->sent_cmd)
3963 return NULL;
3964
3965 hdr = (void *) hdev->sent_cmd->data;
3966
a9de9248 3967 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3968 return NULL;
3969
f0e09510 3970 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3971
3972 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3973}
3974
3975/* Send ACL data */
3976static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3977{
3978 struct hci_acl_hdr *hdr;
3979 int len = skb->len;
3980
badff6d0
ACM
3981 skb_push(skb, HCI_ACL_HDR_SIZE);
3982 skb_reset_transport_header(skb);
9c70220b 3983 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3984 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3985 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3986}
3987
ee22be7e 3988static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3989 struct sk_buff *skb, __u16 flags)
1da177e4 3990{
ee22be7e 3991 struct hci_conn *conn = chan->conn;
1da177e4
LT
3992 struct hci_dev *hdev = conn->hdev;
3993 struct sk_buff *list;
3994
087bfd99
GP
3995 skb->len = skb_headlen(skb);
3996 skb->data_len = 0;
3997
3998 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3999
4000 switch (hdev->dev_type) {
4001 case HCI_BREDR:
4002 hci_add_acl_hdr(skb, conn->handle, flags);
4003 break;
4004 case HCI_AMP:
4005 hci_add_acl_hdr(skb, chan->handle, flags);
4006 break;
4007 default:
4008 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4009 return;
4010 }
087bfd99 4011
70f23020
AE
4012 list = skb_shinfo(skb)->frag_list;
4013 if (!list) {
1da177e4
LT
4014 /* Non fragmented */
4015 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4016
73d80deb 4017 skb_queue_tail(queue, skb);
1da177e4
LT
4018 } else {
4019 /* Fragmented */
4020 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4021
4022 skb_shinfo(skb)->frag_list = NULL;
4023
4024 /* Queue all fragments atomically */
af3e6359 4025 spin_lock(&queue->lock);
1da177e4 4026
73d80deb 4027 __skb_queue_tail(queue, skb);
e702112f
AE
4028
4029 flags &= ~ACL_START;
4030 flags |= ACL_CONT;
1da177e4
LT
4031 do {
4032 skb = list; list = list->next;
8e87d142 4033
0d48d939 4034 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4035 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4036
4037 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4038
73d80deb 4039 __skb_queue_tail(queue, skb);
1da177e4
LT
4040 } while (list);
4041
af3e6359 4042 spin_unlock(&queue->lock);
1da177e4 4043 }
73d80deb
LAD
4044}
4045
4046void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4047{
ee22be7e 4048 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4049
f0e09510 4050 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4051
ee22be7e 4052 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4053
3eff45ea 4054 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4055}
1da177e4
LT
4056
4057/* Send SCO data */
0d861d8b 4058void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4059{
4060 struct hci_dev *hdev = conn->hdev;
4061 struct hci_sco_hdr hdr;
4062
4063 BT_DBG("%s len %d", hdev->name, skb->len);
4064
aca3192c 4065 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4066 hdr.dlen = skb->len;
4067
badff6d0
ACM
4068 skb_push(skb, HCI_SCO_HDR_SIZE);
4069 skb_reset_transport_header(skb);
9c70220b 4070 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4071
0d48d939 4072 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4073
1da177e4 4074 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4075 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4076}
1da177e4
LT
4077
4078/* ---- HCI TX task (outgoing data) ---- */
4079
4080/* HCI Connection scheduler */
6039aa73
GP
4081static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4082 int *quote)
1da177e4
LT
4083{
4084 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4085 struct hci_conn *conn = NULL, *c;
abc5de8f 4086 unsigned int num = 0, min = ~0;
1da177e4 4087
8e87d142 4088 /* We don't have to lock device here. Connections are always
1da177e4 4089 * added and removed with TX task disabled. */
bf4c6325
GP
4090
4091 rcu_read_lock();
4092
4093 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4094 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4095 continue;
769be974
MH
4096
4097 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4098 continue;
4099
1da177e4
LT
4100 num++;
4101
4102 if (c->sent < min) {
4103 min = c->sent;
4104 conn = c;
4105 }
52087a79
LAD
4106
4107 if (hci_conn_num(hdev, type) == num)
4108 break;
1da177e4
LT
4109 }
4110
bf4c6325
GP
4111 rcu_read_unlock();
4112
1da177e4 4113 if (conn) {
6ed58ec5
VT
4114 int cnt, q;
4115
4116 switch (conn->type) {
4117 case ACL_LINK:
4118 cnt = hdev->acl_cnt;
4119 break;
4120 case SCO_LINK:
4121 case ESCO_LINK:
4122 cnt = hdev->sco_cnt;
4123 break;
4124 case LE_LINK:
4125 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4126 break;
4127 default:
4128 cnt = 0;
4129 BT_ERR("Unknown link type");
4130 }
4131
4132 q = cnt / num;
1da177e4
LT
4133 *quote = q ? q : 1;
4134 } else
4135 *quote = 0;
4136
4137 BT_DBG("conn %p quote %d", conn, *quote);
4138 return conn;
4139}
4140
6039aa73 4141static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4142{
4143 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4144 struct hci_conn *c;
1da177e4 4145
bae1f5d9 4146 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4147
bf4c6325
GP
4148 rcu_read_lock();
4149
1da177e4 4150 /* Kill stalled connections */
bf4c6325 4151 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4152 if (c->type == type && c->sent) {
6ed93dc6
AE
4153 BT_ERR("%s killing stalled connection %pMR",
4154 hdev->name, &c->dst);
bed71748 4155 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4156 }
4157 }
bf4c6325
GP
4158
4159 rcu_read_unlock();
1da177e4
LT
4160}
4161
6039aa73
GP
4162static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4163 int *quote)
1da177e4 4164{
73d80deb
LAD
4165 struct hci_conn_hash *h = &hdev->conn_hash;
4166 struct hci_chan *chan = NULL;
abc5de8f 4167 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4168 struct hci_conn *conn;
73d80deb
LAD
4169 int cnt, q, conn_num = 0;
4170
4171 BT_DBG("%s", hdev->name);
4172
bf4c6325
GP
4173 rcu_read_lock();
4174
4175 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4176 struct hci_chan *tmp;
4177
4178 if (conn->type != type)
4179 continue;
4180
4181 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4182 continue;
4183
4184 conn_num++;
4185
8192edef 4186 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4187 struct sk_buff *skb;
4188
4189 if (skb_queue_empty(&tmp->data_q))
4190 continue;
4191
4192 skb = skb_peek(&tmp->data_q);
4193 if (skb->priority < cur_prio)
4194 continue;
4195
4196 if (skb->priority > cur_prio) {
4197 num = 0;
4198 min = ~0;
4199 cur_prio = skb->priority;
4200 }
4201
4202 num++;
4203
4204 if (conn->sent < min) {
4205 min = conn->sent;
4206 chan = tmp;
4207 }
4208 }
4209
4210 if (hci_conn_num(hdev, type) == conn_num)
4211 break;
4212 }
4213
bf4c6325
GP
4214 rcu_read_unlock();
4215
73d80deb
LAD
4216 if (!chan)
4217 return NULL;
4218
4219 switch (chan->conn->type) {
4220 case ACL_LINK:
4221 cnt = hdev->acl_cnt;
4222 break;
bd1eb66b
AE
4223 case AMP_LINK:
4224 cnt = hdev->block_cnt;
4225 break;
73d80deb
LAD
4226 case SCO_LINK:
4227 case ESCO_LINK:
4228 cnt = hdev->sco_cnt;
4229 break;
4230 case LE_LINK:
4231 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4232 break;
4233 default:
4234 cnt = 0;
4235 BT_ERR("Unknown link type");
4236 }
4237
4238 q = cnt / num;
4239 *quote = q ? q : 1;
4240 BT_DBG("chan %p quote %d", chan, *quote);
4241 return chan;
4242}
4243
02b20f0b
LAD
4244static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4245{
4246 struct hci_conn_hash *h = &hdev->conn_hash;
4247 struct hci_conn *conn;
4248 int num = 0;
4249
4250 BT_DBG("%s", hdev->name);
4251
bf4c6325
GP
4252 rcu_read_lock();
4253
4254 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4255 struct hci_chan *chan;
4256
4257 if (conn->type != type)
4258 continue;
4259
4260 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4261 continue;
4262
4263 num++;
4264
8192edef 4265 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4266 struct sk_buff *skb;
4267
4268 if (chan->sent) {
4269 chan->sent = 0;
4270 continue;
4271 }
4272
4273 if (skb_queue_empty(&chan->data_q))
4274 continue;
4275
4276 skb = skb_peek(&chan->data_q);
4277 if (skb->priority >= HCI_PRIO_MAX - 1)
4278 continue;
4279
4280 skb->priority = HCI_PRIO_MAX - 1;
4281
4282 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4283 skb->priority);
02b20f0b
LAD
4284 }
4285
4286 if (hci_conn_num(hdev, type) == num)
4287 break;
4288 }
bf4c6325
GP
4289
4290 rcu_read_unlock();
4291
02b20f0b
LAD
4292}
4293
b71d385a
AE
4294static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4295{
4296 /* Calculate count of blocks used by this packet */
4297 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4298}
4299
6039aa73 4300static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4301{
1da177e4
LT
4302 if (!test_bit(HCI_RAW, &hdev->flags)) {
4303 /* ACL tx timeout must be longer than maximum
4304 * link supervision timeout (40.9 seconds) */
63d2bc1b 4305 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4306 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4307 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4308 }
63d2bc1b 4309}
1da177e4 4310
6039aa73 4311static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4312{
4313 unsigned int cnt = hdev->acl_cnt;
4314 struct hci_chan *chan;
4315 struct sk_buff *skb;
4316 int quote;
4317
4318 __check_timeout(hdev, cnt);
04837f64 4319
73d80deb 4320 while (hdev->acl_cnt &&
a8c5fb1a 4321 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4322 u32 priority = (skb_peek(&chan->data_q))->priority;
4323 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4324 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4325 skb->len, skb->priority);
73d80deb 4326
ec1cce24
LAD
4327 /* Stop if priority has changed */
4328 if (skb->priority < priority)
4329 break;
4330
4331 skb = skb_dequeue(&chan->data_q);
4332
73d80deb 4333 hci_conn_enter_active_mode(chan->conn,
04124681 4334 bt_cb(skb)->force_active);
04837f64 4335
57d17d70 4336 hci_send_frame(hdev, skb);
1da177e4
LT
4337 hdev->acl_last_tx = jiffies;
4338
4339 hdev->acl_cnt--;
73d80deb
LAD
4340 chan->sent++;
4341 chan->conn->sent++;
1da177e4
LT
4342 }
4343 }
02b20f0b
LAD
4344
4345 if (cnt != hdev->acl_cnt)
4346 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4347}
4348
6039aa73 4349static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4350{
63d2bc1b 4351 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4352 struct hci_chan *chan;
4353 struct sk_buff *skb;
4354 int quote;
bd1eb66b 4355 u8 type;
b71d385a 4356
63d2bc1b 4357 __check_timeout(hdev, cnt);
b71d385a 4358
bd1eb66b
AE
4359 BT_DBG("%s", hdev->name);
4360
4361 if (hdev->dev_type == HCI_AMP)
4362 type = AMP_LINK;
4363 else
4364 type = ACL_LINK;
4365
b71d385a 4366 while (hdev->block_cnt > 0 &&
bd1eb66b 4367 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4368 u32 priority = (skb_peek(&chan->data_q))->priority;
4369 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4370 int blocks;
4371
4372 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4373 skb->len, skb->priority);
b71d385a
AE
4374
4375 /* Stop if priority has changed */
4376 if (skb->priority < priority)
4377 break;
4378
4379 skb = skb_dequeue(&chan->data_q);
4380
4381 blocks = __get_blocks(hdev, skb);
4382 if (blocks > hdev->block_cnt)
4383 return;
4384
4385 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4386 bt_cb(skb)->force_active);
b71d385a 4387
57d17d70 4388 hci_send_frame(hdev, skb);
b71d385a
AE
4389 hdev->acl_last_tx = jiffies;
4390
4391 hdev->block_cnt -= blocks;
4392 quote -= blocks;
4393
4394 chan->sent += blocks;
4395 chan->conn->sent += blocks;
4396 }
4397 }
4398
4399 if (cnt != hdev->block_cnt)
bd1eb66b 4400 hci_prio_recalculate(hdev, type);
b71d385a
AE
4401}
4402
6039aa73 4403static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4404{
4405 BT_DBG("%s", hdev->name);
4406
bd1eb66b
AE
4407 /* No ACL link over BR/EDR controller */
4408 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4409 return;
4410
4411 /* No AMP link over AMP controller */
4412 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4413 return;
4414
4415 switch (hdev->flow_ctl_mode) {
4416 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4417 hci_sched_acl_pkt(hdev);
4418 break;
4419
4420 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4421 hci_sched_acl_blk(hdev);
4422 break;
4423 }
4424}
4425
1da177e4 4426/* Schedule SCO */
6039aa73 4427static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4428{
4429 struct hci_conn *conn;
4430 struct sk_buff *skb;
4431 int quote;
4432
4433 BT_DBG("%s", hdev->name);
4434
52087a79
LAD
4435 if (!hci_conn_num(hdev, SCO_LINK))
4436 return;
4437
1da177e4
LT
4438 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4439 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4440 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4441 hci_send_frame(hdev, skb);
1da177e4
LT
4442
4443 conn->sent++;
4444 if (conn->sent == ~0)
4445 conn->sent = 0;
4446 }
4447 }
4448}
4449
6039aa73 4450static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4451{
4452 struct hci_conn *conn;
4453 struct sk_buff *skb;
4454 int quote;
4455
4456 BT_DBG("%s", hdev->name);
4457
52087a79
LAD
4458 if (!hci_conn_num(hdev, ESCO_LINK))
4459 return;
4460
8fc9ced3
GP
4461 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4462 &quote))) {
b6a0dc82
MH
4463 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4464 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4465 hci_send_frame(hdev, skb);
b6a0dc82
MH
4466
4467 conn->sent++;
4468 if (conn->sent == ~0)
4469 conn->sent = 0;
4470 }
4471 }
4472}
4473
6039aa73 4474static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4475{
73d80deb 4476 struct hci_chan *chan;
6ed58ec5 4477 struct sk_buff *skb;
02b20f0b 4478 int quote, cnt, tmp;
6ed58ec5
VT
4479
4480 BT_DBG("%s", hdev->name);
4481
52087a79
LAD
4482 if (!hci_conn_num(hdev, LE_LINK))
4483 return;
4484
6ed58ec5
VT
4485 if (!test_bit(HCI_RAW, &hdev->flags)) {
4486 /* LE tx timeout must be longer than maximum
4487 * link supervision timeout (40.9 seconds) */
bae1f5d9 4488 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4489 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4490 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4491 }
4492
4493 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4494 tmp = cnt;
73d80deb 4495 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4496 u32 priority = (skb_peek(&chan->data_q))->priority;
4497 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4498 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4499 skb->len, skb->priority);
6ed58ec5 4500
ec1cce24
LAD
4501 /* Stop if priority has changed */
4502 if (skb->priority < priority)
4503 break;
4504
4505 skb = skb_dequeue(&chan->data_q);
4506
57d17d70 4507 hci_send_frame(hdev, skb);
6ed58ec5
VT
4508 hdev->le_last_tx = jiffies;
4509
4510 cnt--;
73d80deb
LAD
4511 chan->sent++;
4512 chan->conn->sent++;
6ed58ec5
VT
4513 }
4514 }
73d80deb 4515
6ed58ec5
VT
4516 if (hdev->le_pkts)
4517 hdev->le_cnt = cnt;
4518 else
4519 hdev->acl_cnt = cnt;
02b20f0b
LAD
4520
4521 if (cnt != tmp)
4522 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4523}
4524
3eff45ea 4525static void hci_tx_work(struct work_struct *work)
1da177e4 4526{
3eff45ea 4527 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4528 struct sk_buff *skb;
4529
6ed58ec5 4530 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4531 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4532
52de599e
MH
4533 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4534 /* Schedule queues and send stuff to HCI driver */
4535 hci_sched_acl(hdev);
4536 hci_sched_sco(hdev);
4537 hci_sched_esco(hdev);
4538 hci_sched_le(hdev);
4539 }
6ed58ec5 4540
1da177e4
LT
4541 /* Send next queued raw (unknown type) packet */
4542 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4543 hci_send_frame(hdev, skb);
1da177e4
LT
4544}
4545
25985edc 4546/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4547
4548/* ACL data packet */
6039aa73 4549static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4550{
4551 struct hci_acl_hdr *hdr = (void *) skb->data;
4552 struct hci_conn *conn;
4553 __u16 handle, flags;
4554
4555 skb_pull(skb, HCI_ACL_HDR_SIZE);
4556
4557 handle = __le16_to_cpu(hdr->handle);
4558 flags = hci_flags(handle);
4559 handle = hci_handle(handle);
4560
f0e09510 4561 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4562 handle, flags);
1da177e4
LT
4563
4564 hdev->stat.acl_rx++;
4565
4566 hci_dev_lock(hdev);
4567 conn = hci_conn_hash_lookup_handle(hdev, handle);
4568 hci_dev_unlock(hdev);
8e87d142 4569
1da177e4 4570 if (conn) {
65983fc7 4571 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4572
1da177e4 4573 /* Send to upper protocol */
686ebf28
UF
4574 l2cap_recv_acldata(conn, skb, flags);
4575 return;
1da177e4 4576 } else {
8e87d142 4577 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4578 hdev->name, handle);
1da177e4
LT
4579 }
4580
4581 kfree_skb(skb);
4582}
4583
4584/* SCO data packet */
6039aa73 4585static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4586{
4587 struct hci_sco_hdr *hdr = (void *) skb->data;
4588 struct hci_conn *conn;
4589 __u16 handle;
4590
4591 skb_pull(skb, HCI_SCO_HDR_SIZE);
4592
4593 handle = __le16_to_cpu(hdr->handle);
4594
f0e09510 4595 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4596
4597 hdev->stat.sco_rx++;
4598
4599 hci_dev_lock(hdev);
4600 conn = hci_conn_hash_lookup_handle(hdev, handle);
4601 hci_dev_unlock(hdev);
4602
4603 if (conn) {
1da177e4 4604 /* Send to upper protocol */
686ebf28
UF
4605 sco_recv_scodata(conn, skb);
4606 return;
1da177e4 4607 } else {
8e87d142 4608 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4609 hdev->name, handle);
1da177e4
LT
4610 }
4611
4612 kfree_skb(skb);
4613}
4614
9238f36a
JH
4615static bool hci_req_is_complete(struct hci_dev *hdev)
4616{
4617 struct sk_buff *skb;
4618
4619 skb = skb_peek(&hdev->cmd_q);
4620 if (!skb)
4621 return true;
4622
4623 return bt_cb(skb)->req.start;
4624}
4625
42c6b129
JH
4626static void hci_resend_last(struct hci_dev *hdev)
4627{
4628 struct hci_command_hdr *sent;
4629 struct sk_buff *skb;
4630 u16 opcode;
4631
4632 if (!hdev->sent_cmd)
4633 return;
4634
4635 sent = (void *) hdev->sent_cmd->data;
4636 opcode = __le16_to_cpu(sent->opcode);
4637 if (opcode == HCI_OP_RESET)
4638 return;
4639
4640 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4641 if (!skb)
4642 return;
4643
4644 skb_queue_head(&hdev->cmd_q, skb);
4645 queue_work(hdev->workqueue, &hdev->cmd_work);
4646}
4647
9238f36a
JH
4648void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4649{
4650 hci_req_complete_t req_complete = NULL;
4651 struct sk_buff *skb;
4652 unsigned long flags;
4653
4654 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4655
42c6b129
JH
4656 /* If the completed command doesn't match the last one that was
4657 * sent we need to do special handling of it.
9238f36a 4658 */
42c6b129
JH
4659 if (!hci_sent_cmd_data(hdev, opcode)) {
4660 /* Some CSR based controllers generate a spontaneous
4661 * reset complete event during init and any pending
4662 * command will never be completed. In such a case we
4663 * need to resend whatever was the last sent
4664 * command.
4665 */
4666 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4667 hci_resend_last(hdev);
4668
9238f36a 4669 return;
42c6b129 4670 }
9238f36a
JH
4671
4672 /* If the command succeeded and there's still more commands in
4673 * this request the request is not yet complete.
4674 */
4675 if (!status && !hci_req_is_complete(hdev))
4676 return;
4677
4678 /* If this was the last command in a request the complete
4679 * callback would be found in hdev->sent_cmd instead of the
4680 * command queue (hdev->cmd_q).
4681 */
4682 if (hdev->sent_cmd) {
4683 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4684
4685 if (req_complete) {
4686 /* We must set the complete callback to NULL to
4687 * avoid calling the callback more than once if
4688 * this function gets called again.
4689 */
4690 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4691
9238f36a 4692 goto call_complete;
53e21fbc 4693 }
9238f36a
JH
4694 }
4695
4696 /* Remove all pending commands belonging to this request */
4697 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4698 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4699 if (bt_cb(skb)->req.start) {
4700 __skb_queue_head(&hdev->cmd_q, skb);
4701 break;
4702 }
4703
4704 req_complete = bt_cb(skb)->req.complete;
4705 kfree_skb(skb);
4706 }
4707 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4708
4709call_complete:
4710 if (req_complete)
4711 req_complete(hdev, status);
4712}
4713
b78752cc 4714static void hci_rx_work(struct work_struct *work)
1da177e4 4715{
b78752cc 4716 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4717 struct sk_buff *skb;
4718
4719 BT_DBG("%s", hdev->name);
4720
1da177e4 4721 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4722 /* Send copy to monitor */
4723 hci_send_to_monitor(hdev, skb);
4724
1da177e4
LT
4725 if (atomic_read(&hdev->promisc)) {
4726 /* Send copy to the sockets */
470fe1b5 4727 hci_send_to_sock(hdev, skb);
1da177e4
LT
4728 }
4729
0736cfa8
MH
4730 if (test_bit(HCI_RAW, &hdev->flags) ||
4731 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4732 kfree_skb(skb);
4733 continue;
4734 }
4735
4736 if (test_bit(HCI_INIT, &hdev->flags)) {
4737 /* Don't process data packets in this states. */
0d48d939 4738 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4739 case HCI_ACLDATA_PKT:
4740 case HCI_SCODATA_PKT:
4741 kfree_skb(skb);
4742 continue;
3ff50b79 4743 }
1da177e4
LT
4744 }
4745
4746 /* Process frame */
0d48d939 4747 switch (bt_cb(skb)->pkt_type) {
1da177e4 4748 case HCI_EVENT_PKT:
b78752cc 4749 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4750 hci_event_packet(hdev, skb);
4751 break;
4752
4753 case HCI_ACLDATA_PKT:
4754 BT_DBG("%s ACL data packet", hdev->name);
4755 hci_acldata_packet(hdev, skb);
4756 break;
4757
4758 case HCI_SCODATA_PKT:
4759 BT_DBG("%s SCO data packet", hdev->name);
4760 hci_scodata_packet(hdev, skb);
4761 break;
4762
4763 default:
4764 kfree_skb(skb);
4765 break;
4766 }
4767 }
1da177e4
LT
4768}
4769
c347b765 4770static void hci_cmd_work(struct work_struct *work)
1da177e4 4771{
c347b765 4772 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4773 struct sk_buff *skb;
4774
2104786b
AE
4775 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4776 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4777
1da177e4 4778 /* Send queued commands */
5a08ecce
AE
4779 if (atomic_read(&hdev->cmd_cnt)) {
4780 skb = skb_dequeue(&hdev->cmd_q);
4781 if (!skb)
4782 return;
4783
7585b97a 4784 kfree_skb(hdev->sent_cmd);
1da177e4 4785
a675d7f1 4786 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4787 if (hdev->sent_cmd) {
1da177e4 4788 atomic_dec(&hdev->cmd_cnt);
57d17d70 4789 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
4790 if (test_bit(HCI_RESET, &hdev->flags))
4791 del_timer(&hdev->cmd_timer);
4792 else
4793 mod_timer(&hdev->cmd_timer,
5f246e89 4794 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
4795 } else {
4796 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4797 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4798 }
4799 }
4800}
This page took 1.181074 seconds and 5 git commands to generate.