Bluetooth: Fix channel check when binding RFCOMM sock
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
970c4e46
JH
38#include "smp.h"
39
b78752cc 40static void hci_rx_work(struct work_struct *work);
c347b765 41static void hci_cmd_work(struct work_struct *work);
3eff45ea 42static void hci_tx_work(struct work_struct *work);
1da177e4 43
1da177e4
LT
44/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
3df92b31
SL
52/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
1da177e4
LT
55/* ---- HCI notifications ---- */
56
6516455d 57static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 58{
040030ef 59 hci_sock_dev_event(hdev, event);
1da177e4
LT
60}
61
baf27f6e
MH
62/* ---- HCI debugfs entries ---- */
63
4b4148e9
MH
64static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
dfb826a8
MH
129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
cfbb2b5b
MH
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
70afe0b8
MH
167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
47219839
MH
192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
199 u8 i, val[16];
200
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
207
208 seq_printf(f, "%pUb\n", val);
47219839
MH
209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
baf27f6e
MH
227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
02d08d15
MH
263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
babdbb3c
MH
291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
041000b9
MH
315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
ebd1e33b
MH
329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
06f5b778
MH
354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
5afeac14
MH
403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
134c2a89
MH
449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
2bfa3531
MH
467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
2be48b65 475 hdev->idle_timeout = val;
2bfa3531
MH
476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495static int sniff_min_interval_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
500 return -EINVAL;
501
502 hci_dev_lock(hdev);
2be48b65 503 hdev->sniff_min_interval = val;
2bfa3531
MH
504 hci_dev_unlock(hdev);
505
506 return 0;
507}
508
509static int sniff_min_interval_get(void *data, u64 *val)
510{
511 struct hci_dev *hdev = data;
512
513 hci_dev_lock(hdev);
514 *val = hdev->sniff_min_interval;
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521 sniff_min_interval_set, "%llu\n");
522
523static int sniff_max_interval_set(void *data, u64 val)
524{
525 struct hci_dev *hdev = data;
526
527 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
528 return -EINVAL;
529
530 hci_dev_lock(hdev);
2be48b65 531 hdev->sniff_max_interval = val;
2bfa3531
MH
532 hci_dev_unlock(hdev);
533
534 return 0;
535}
536
537static int sniff_max_interval_get(void *data, u64 *val)
538{
539 struct hci_dev *hdev = data;
540
541 hci_dev_lock(hdev);
542 *val = hdev->sniff_max_interval;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549 sniff_max_interval_set, "%llu\n");
550
7a4cd51d
MH
551static int random_address_show(struct seq_file *f, void *p)
552{
553 struct hci_dev *hdev = f->private;
554
555 hci_dev_lock(hdev);
556 seq_printf(f, "%pMR\n", &hdev->random_addr);
557 hci_dev_unlock(hdev);
558
559 return 0;
560}
561
562static int random_address_open(struct inode *inode, struct file *file)
563{
564 return single_open(file, random_address_show, inode->i_private);
565}
566
567static const struct file_operations random_address_fops = {
568 .open = random_address_open,
569 .read = seq_read,
570 .llseek = seq_lseek,
571 .release = single_release,
572};
573
e7b8fc92
MH
574static int static_address_show(struct seq_file *f, void *p)
575{
576 struct hci_dev *hdev = f->private;
577
578 hci_dev_lock(hdev);
579 seq_printf(f, "%pMR\n", &hdev->static_addr);
580 hci_dev_unlock(hdev);
581
582 return 0;
583}
584
585static int static_address_open(struct inode *inode, struct file *file)
586{
587 return single_open(file, static_address_show, inode->i_private);
588}
589
590static const struct file_operations static_address_fops = {
591 .open = static_address_open,
592 .read = seq_read,
593 .llseek = seq_lseek,
594 .release = single_release,
595};
596
b32bba6c
MH
597static ssize_t force_static_address_read(struct file *file,
598 char __user *user_buf,
599 size_t count, loff_t *ppos)
92202185 600{
b32bba6c
MH
601 struct hci_dev *hdev = file->private_data;
602 char buf[3];
92202185 603
b32bba6c
MH
604 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
605 buf[1] = '\n';
606 buf[2] = '\0';
607 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
608}
609
b32bba6c
MH
610static ssize_t force_static_address_write(struct file *file,
611 const char __user *user_buf,
612 size_t count, loff_t *ppos)
92202185 613{
b32bba6c
MH
614 struct hci_dev *hdev = file->private_data;
615 char buf[32];
616 size_t buf_size = min(count, (sizeof(buf)-1));
617 bool enable;
92202185 618
b32bba6c
MH
619 if (test_bit(HCI_UP, &hdev->flags))
620 return -EBUSY;
92202185 621
b32bba6c
MH
622 if (copy_from_user(buf, user_buf, buf_size))
623 return -EFAULT;
624
625 buf[buf_size] = '\0';
626 if (strtobool(buf, &enable))
627 return -EINVAL;
628
629 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
630 return -EALREADY;
631
632 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
633
634 return count;
92202185
MH
635}
636
b32bba6c
MH
637static const struct file_operations force_static_address_fops = {
638 .open = simple_open,
639 .read = force_static_address_read,
640 .write = force_static_address_write,
641 .llseek = default_llseek,
642};
92202185 643
3698d704
MH
644static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
645{
646 struct hci_dev *hdev = f->private;
647 struct list_head *p, *n;
648
649 hci_dev_lock(hdev);
650 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
651 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
652 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
653 &irk->bdaddr, irk->addr_type,
654 16, irk->val, &irk->rpa);
655 }
656 hci_dev_unlock(hdev);
657
658 return 0;
659}
660
661static int identity_resolving_keys_open(struct inode *inode, struct file *file)
662{
663 return single_open(file, identity_resolving_keys_show,
664 inode->i_private);
665}
666
667static const struct file_operations identity_resolving_keys_fops = {
668 .open = identity_resolving_keys_open,
669 .read = seq_read,
670 .llseek = seq_lseek,
671 .release = single_release,
672};
673
8f8625cd
MH
674static int long_term_keys_show(struct seq_file *f, void *ptr)
675{
676 struct hci_dev *hdev = f->private;
677 struct list_head *p, *n;
678
679 hci_dev_lock(hdev);
f813f1be 680 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 681 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
f813f1be 682 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
8f8625cd
MH
683 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
684 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
685 8, ltk->rand, 16, ltk->val);
686 }
687 hci_dev_unlock(hdev);
688
689 return 0;
690}
691
692static int long_term_keys_open(struct inode *inode, struct file *file)
693{
694 return single_open(file, long_term_keys_show, inode->i_private);
695}
696
697static const struct file_operations long_term_keys_fops = {
698 .open = long_term_keys_open,
699 .read = seq_read,
700 .llseek = seq_lseek,
701 .release = single_release,
702};
703
4e70c7e7
MH
704static int conn_min_interval_set(void *data, u64 val)
705{
706 struct hci_dev *hdev = data;
707
708 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
709 return -EINVAL;
710
711 hci_dev_lock(hdev);
2be48b65 712 hdev->le_conn_min_interval = val;
4e70c7e7
MH
713 hci_dev_unlock(hdev);
714
715 return 0;
716}
717
718static int conn_min_interval_get(void *data, u64 *val)
719{
720 struct hci_dev *hdev = data;
721
722 hci_dev_lock(hdev);
723 *val = hdev->le_conn_min_interval;
724 hci_dev_unlock(hdev);
725
726 return 0;
727}
728
729DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
730 conn_min_interval_set, "%llu\n");
731
732static int conn_max_interval_set(void *data, u64 val)
733{
734 struct hci_dev *hdev = data;
735
736 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
737 return -EINVAL;
738
739 hci_dev_lock(hdev);
2be48b65 740 hdev->le_conn_max_interval = val;
4e70c7e7
MH
741 hci_dev_unlock(hdev);
742
743 return 0;
744}
745
746static int conn_max_interval_get(void *data, u64 *val)
747{
748 struct hci_dev *hdev = data;
749
750 hci_dev_lock(hdev);
751 *val = hdev->le_conn_max_interval;
752 hci_dev_unlock(hdev);
753
754 return 0;
755}
756
757DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
758 conn_max_interval_set, "%llu\n");
759
89863109
JR
760static ssize_t lowpan_read(struct file *file, char __user *user_buf,
761 size_t count, loff_t *ppos)
762{
763 struct hci_dev *hdev = file->private_data;
764 char buf[3];
765
766 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
767 buf[1] = '\n';
768 buf[2] = '\0';
769 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
770}
771
772static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
773 size_t count, loff_t *position)
774{
775 struct hci_dev *hdev = fp->private_data;
776 bool enable;
777 char buf[32];
778 size_t buf_size = min(count, (sizeof(buf)-1));
779
780 if (copy_from_user(buf, user_buffer, buf_size))
781 return -EFAULT;
782
783 buf[buf_size] = '\0';
784
785 if (strtobool(buf, &enable) < 0)
786 return -EINVAL;
787
788 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
789 return -EALREADY;
790
791 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
792
793 return count;
794}
795
796static const struct file_operations lowpan_debugfs_fops = {
797 .open = simple_open,
798 .read = lowpan_read,
799 .write = lowpan_write,
800 .llseek = default_llseek,
801};
802
1da177e4
LT
803/* ---- HCI requests ---- */
804
42c6b129 805static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 806{
42c6b129 807 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
808
809 if (hdev->req_status == HCI_REQ_PEND) {
810 hdev->req_result = result;
811 hdev->req_status = HCI_REQ_DONE;
812 wake_up_interruptible(&hdev->req_wait_q);
813 }
814}
815
816static void hci_req_cancel(struct hci_dev *hdev, int err)
817{
818 BT_DBG("%s err 0x%2.2x", hdev->name, err);
819
820 if (hdev->req_status == HCI_REQ_PEND) {
821 hdev->req_result = err;
822 hdev->req_status = HCI_REQ_CANCELED;
823 wake_up_interruptible(&hdev->req_wait_q);
824 }
825}
826
77a63e0a
FW
827static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
828 u8 event)
75e84b7c
JH
829{
830 struct hci_ev_cmd_complete *ev;
831 struct hci_event_hdr *hdr;
832 struct sk_buff *skb;
833
834 hci_dev_lock(hdev);
835
836 skb = hdev->recv_evt;
837 hdev->recv_evt = NULL;
838
839 hci_dev_unlock(hdev);
840
841 if (!skb)
842 return ERR_PTR(-ENODATA);
843
844 if (skb->len < sizeof(*hdr)) {
845 BT_ERR("Too short HCI event");
846 goto failed;
847 }
848
849 hdr = (void *) skb->data;
850 skb_pull(skb, HCI_EVENT_HDR_SIZE);
851
7b1abbbe
JH
852 if (event) {
853 if (hdr->evt != event)
854 goto failed;
855 return skb;
856 }
857
75e84b7c
JH
858 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
859 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
860 goto failed;
861 }
862
863 if (skb->len < sizeof(*ev)) {
864 BT_ERR("Too short cmd_complete event");
865 goto failed;
866 }
867
868 ev = (void *) skb->data;
869 skb_pull(skb, sizeof(*ev));
870
871 if (opcode == __le16_to_cpu(ev->opcode))
872 return skb;
873
874 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
875 __le16_to_cpu(ev->opcode));
876
877failed:
878 kfree_skb(skb);
879 return ERR_PTR(-ENODATA);
880}
881
7b1abbbe 882struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 883 const void *param, u8 event, u32 timeout)
75e84b7c
JH
884{
885 DECLARE_WAITQUEUE(wait, current);
886 struct hci_request req;
887 int err = 0;
888
889 BT_DBG("%s", hdev->name);
890
891 hci_req_init(&req, hdev);
892
7b1abbbe 893 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
894
895 hdev->req_status = HCI_REQ_PEND;
896
897 err = hci_req_run(&req, hci_req_sync_complete);
898 if (err < 0)
899 return ERR_PTR(err);
900
901 add_wait_queue(&hdev->req_wait_q, &wait);
902 set_current_state(TASK_INTERRUPTIBLE);
903
904 schedule_timeout(timeout);
905
906 remove_wait_queue(&hdev->req_wait_q, &wait);
907
908 if (signal_pending(current))
909 return ERR_PTR(-EINTR);
910
911 switch (hdev->req_status) {
912 case HCI_REQ_DONE:
913 err = -bt_to_errno(hdev->req_result);
914 break;
915
916 case HCI_REQ_CANCELED:
917 err = -hdev->req_result;
918 break;
919
920 default:
921 err = -ETIMEDOUT;
922 break;
923 }
924
925 hdev->req_status = hdev->req_result = 0;
926
927 BT_DBG("%s end: err %d", hdev->name, err);
928
929 if (err < 0)
930 return ERR_PTR(err);
931
7b1abbbe
JH
932 return hci_get_cmd_complete(hdev, opcode, event);
933}
934EXPORT_SYMBOL(__hci_cmd_sync_ev);
935
936struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 937 const void *param, u32 timeout)
7b1abbbe
JH
938{
939 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
940}
941EXPORT_SYMBOL(__hci_cmd_sync);
942
1da177e4 943/* Execute request and wait for completion. */
01178cd4 944static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
945 void (*func)(struct hci_request *req,
946 unsigned long opt),
01178cd4 947 unsigned long opt, __u32 timeout)
1da177e4 948{
42c6b129 949 struct hci_request req;
1da177e4
LT
950 DECLARE_WAITQUEUE(wait, current);
951 int err = 0;
952
953 BT_DBG("%s start", hdev->name);
954
42c6b129
JH
955 hci_req_init(&req, hdev);
956
1da177e4
LT
957 hdev->req_status = HCI_REQ_PEND;
958
42c6b129 959 func(&req, opt);
53cce22d 960
42c6b129
JH
961 err = hci_req_run(&req, hci_req_sync_complete);
962 if (err < 0) {
53cce22d 963 hdev->req_status = 0;
920c8300
AG
964
965 /* ENODATA means the HCI request command queue is empty.
966 * This can happen when a request with conditionals doesn't
967 * trigger any commands to be sent. This is normal behavior
968 * and should not trigger an error return.
42c6b129 969 */
920c8300
AG
970 if (err == -ENODATA)
971 return 0;
972
973 return err;
53cce22d
JH
974 }
975
bc4445c7
AG
976 add_wait_queue(&hdev->req_wait_q, &wait);
977 set_current_state(TASK_INTERRUPTIBLE);
978
1da177e4
LT
979 schedule_timeout(timeout);
980
981 remove_wait_queue(&hdev->req_wait_q, &wait);
982
983 if (signal_pending(current))
984 return -EINTR;
985
986 switch (hdev->req_status) {
987 case HCI_REQ_DONE:
e175072f 988 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
989 break;
990
991 case HCI_REQ_CANCELED:
992 err = -hdev->req_result;
993 break;
994
995 default:
996 err = -ETIMEDOUT;
997 break;
3ff50b79 998 }
1da177e4 999
a5040efa 1000 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1001
1002 BT_DBG("%s end: err %d", hdev->name, err);
1003
1004 return err;
1005}
1006
01178cd4 1007static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1008 void (*req)(struct hci_request *req,
1009 unsigned long opt),
01178cd4 1010 unsigned long opt, __u32 timeout)
1da177e4
LT
1011{
1012 int ret;
1013
7c6a329e
MH
1014 if (!test_bit(HCI_UP, &hdev->flags))
1015 return -ENETDOWN;
1016
1da177e4
LT
1017 /* Serialize all requests */
1018 hci_req_lock(hdev);
01178cd4 1019 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1020 hci_req_unlock(hdev);
1021
1022 return ret;
1023}
1024
42c6b129 1025static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1026{
42c6b129 1027 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1028
1029 /* Reset device */
42c6b129
JH
1030 set_bit(HCI_RESET, &req->hdev->flags);
1031 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1032}
1033
42c6b129 1034static void bredr_init(struct hci_request *req)
1da177e4 1035{
42c6b129 1036 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1037
1da177e4 1038 /* Read Local Supported Features */
42c6b129 1039 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1040
1143e5a6 1041 /* Read Local Version */
42c6b129 1042 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1043
1044 /* Read BD Address */
42c6b129 1045 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1046}
1047
42c6b129 1048static void amp_init(struct hci_request *req)
e61ef499 1049{
42c6b129 1050 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1051
e61ef499 1052 /* Read Local Version */
42c6b129 1053 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1054
f6996cfe
MH
1055 /* Read Local Supported Commands */
1056 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1057
1058 /* Read Local Supported Features */
1059 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1060
6bcbc489 1061 /* Read Local AMP Info */
42c6b129 1062 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1063
1064 /* Read Data Blk size */
42c6b129 1065 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1066
f38ba941
MH
1067 /* Read Flow Control Mode */
1068 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1069
7528ca1c
MH
1070 /* Read Location Data */
1071 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1072}
1073
42c6b129 1074static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1075{
42c6b129 1076 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1077
1078 BT_DBG("%s %ld", hdev->name, opt);
1079
11778716
AE
1080 /* Reset */
1081 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1082 hci_reset_req(req, 0);
11778716 1083
e61ef499
AE
1084 switch (hdev->dev_type) {
1085 case HCI_BREDR:
42c6b129 1086 bredr_init(req);
e61ef499
AE
1087 break;
1088
1089 case HCI_AMP:
42c6b129 1090 amp_init(req);
e61ef499
AE
1091 break;
1092
1093 default:
1094 BT_ERR("Unknown device type %d", hdev->dev_type);
1095 break;
1096 }
e61ef499
AE
1097}
1098
42c6b129 1099static void bredr_setup(struct hci_request *req)
2177bab5 1100{
4ca048e3
MH
1101 struct hci_dev *hdev = req->hdev;
1102
2177bab5
JH
1103 __le16 param;
1104 __u8 flt_type;
1105
1106 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1107 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1108
1109 /* Read Class of Device */
42c6b129 1110 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1111
1112 /* Read Local Name */
42c6b129 1113 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1114
1115 /* Read Voice Setting */
42c6b129 1116 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1117
b4cb9fb2
MH
1118 /* Read Number of Supported IAC */
1119 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1120
4b836f39
MH
1121 /* Read Current IAC LAP */
1122 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1123
2177bab5
JH
1124 /* Clear Event Filters */
1125 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1126 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1127
1128 /* Connection accept timeout ~20 secs */
1129 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1130 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1131
4ca048e3
MH
1132 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1133 * but it does not support page scan related HCI commands.
1134 */
1135 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1136 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1137 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1138 }
2177bab5
JH
1139}
1140
42c6b129 1141static void le_setup(struct hci_request *req)
2177bab5 1142{
c73eee91
JH
1143 struct hci_dev *hdev = req->hdev;
1144
2177bab5 1145 /* Read LE Buffer Size */
42c6b129 1146 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1147
1148 /* Read LE Local Supported Features */
42c6b129 1149 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
1150
1151 /* Read LE Advertising Channel TX Power */
42c6b129 1152 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1153
1154 /* Read LE White List Size */
42c6b129 1155 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
1156
1157 /* Read LE Supported States */
42c6b129 1158 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
1159
1160 /* LE-only controllers have LE implicitly enabled */
1161 if (!lmp_bredr_capable(hdev))
1162 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1163}
1164
1165static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1166{
1167 if (lmp_ext_inq_capable(hdev))
1168 return 0x02;
1169
1170 if (lmp_inq_rssi_capable(hdev))
1171 return 0x01;
1172
1173 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1174 hdev->lmp_subver == 0x0757)
1175 return 0x01;
1176
1177 if (hdev->manufacturer == 15) {
1178 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1179 return 0x01;
1180 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1181 return 0x01;
1182 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1183 return 0x01;
1184 }
1185
1186 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1187 hdev->lmp_subver == 0x1805)
1188 return 0x01;
1189
1190 return 0x00;
1191}
1192
42c6b129 1193static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1194{
1195 u8 mode;
1196
42c6b129 1197 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1198
42c6b129 1199 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1200}
1201
42c6b129 1202static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1203{
42c6b129
JH
1204 struct hci_dev *hdev = req->hdev;
1205
2177bab5
JH
1206 /* The second byte is 0xff instead of 0x9f (two reserved bits
1207 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1208 * command otherwise.
1209 */
1210 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1211
1212 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1213 * any event mask for pre 1.2 devices.
1214 */
1215 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1216 return;
1217
1218 if (lmp_bredr_capable(hdev)) {
1219 events[4] |= 0x01; /* Flow Specification Complete */
1220 events[4] |= 0x02; /* Inquiry Result with RSSI */
1221 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1222 events[5] |= 0x08; /* Synchronous Connection Complete */
1223 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1224 } else {
1225 /* Use a different default for LE-only devices */
1226 memset(events, 0, sizeof(events));
1227 events[0] |= 0x10; /* Disconnection Complete */
1228 events[0] |= 0x80; /* Encryption Change */
1229 events[1] |= 0x08; /* Read Remote Version Information Complete */
1230 events[1] |= 0x20; /* Command Complete */
1231 events[1] |= 0x40; /* Command Status */
1232 events[1] |= 0x80; /* Hardware Error */
1233 events[2] |= 0x04; /* Number of Completed Packets */
1234 events[3] |= 0x02; /* Data Buffer Overflow */
1235 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1236 }
1237
1238 if (lmp_inq_rssi_capable(hdev))
1239 events[4] |= 0x02; /* Inquiry Result with RSSI */
1240
1241 if (lmp_sniffsubr_capable(hdev))
1242 events[5] |= 0x20; /* Sniff Subrating */
1243
1244 if (lmp_pause_enc_capable(hdev))
1245 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1246
1247 if (lmp_ext_inq_capable(hdev))
1248 events[5] |= 0x40; /* Extended Inquiry Result */
1249
1250 if (lmp_no_flush_capable(hdev))
1251 events[7] |= 0x01; /* Enhanced Flush Complete */
1252
1253 if (lmp_lsto_capable(hdev))
1254 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1255
1256 if (lmp_ssp_capable(hdev)) {
1257 events[6] |= 0x01; /* IO Capability Request */
1258 events[6] |= 0x02; /* IO Capability Response */
1259 events[6] |= 0x04; /* User Confirmation Request */
1260 events[6] |= 0x08; /* User Passkey Request */
1261 events[6] |= 0x10; /* Remote OOB Data Request */
1262 events[6] |= 0x20; /* Simple Pairing Complete */
1263 events[7] |= 0x04; /* User Passkey Notification */
1264 events[7] |= 0x08; /* Keypress Notification */
1265 events[7] |= 0x10; /* Remote Host Supported
1266 * Features Notification
1267 */
1268 }
1269
1270 if (lmp_le_capable(hdev))
1271 events[7] |= 0x20; /* LE Meta-Event */
1272
42c6b129 1273 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1274
1275 if (lmp_le_capable(hdev)) {
1276 memset(events, 0, sizeof(events));
1277 events[0] = 0x1f;
42c6b129
JH
1278 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1279 sizeof(events), events);
2177bab5
JH
1280 }
1281}
1282
42c6b129 1283static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1284{
42c6b129
JH
1285 struct hci_dev *hdev = req->hdev;
1286
2177bab5 1287 if (lmp_bredr_capable(hdev))
42c6b129 1288 bredr_setup(req);
56f87901
JH
1289 else
1290 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1291
1292 if (lmp_le_capable(hdev))
42c6b129 1293 le_setup(req);
2177bab5 1294
42c6b129 1295 hci_setup_event_mask(req);
2177bab5 1296
3f8e2d75
JH
1297 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1298 * local supported commands HCI command.
1299 */
1300 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1301 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1302
1303 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1304 /* When SSP is available, then the host features page
1305 * should also be available as well. However some
1306 * controllers list the max_page as 0 as long as SSP
1307 * has not been enabled. To achieve proper debugging
1308 * output, force the minimum max_page to 1 at least.
1309 */
1310 hdev->max_page = 0x01;
1311
2177bab5
JH
1312 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1313 u8 mode = 0x01;
42c6b129
JH
1314 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1315 sizeof(mode), &mode);
2177bab5
JH
1316 } else {
1317 struct hci_cp_write_eir cp;
1318
1319 memset(hdev->eir, 0, sizeof(hdev->eir));
1320 memset(&cp, 0, sizeof(cp));
1321
42c6b129 1322 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1323 }
1324 }
1325
1326 if (lmp_inq_rssi_capable(hdev))
42c6b129 1327 hci_setup_inquiry_mode(req);
2177bab5
JH
1328
1329 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1330 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1331
1332 if (lmp_ext_feat_capable(hdev)) {
1333 struct hci_cp_read_local_ext_features cp;
1334
1335 cp.page = 0x01;
42c6b129
JH
1336 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1337 sizeof(cp), &cp);
2177bab5
JH
1338 }
1339
1340 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1341 u8 enable = 1;
42c6b129
JH
1342 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1343 &enable);
2177bab5
JH
1344 }
1345}
1346
42c6b129 1347static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1348{
42c6b129 1349 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1350 struct hci_cp_write_def_link_policy cp;
1351 u16 link_policy = 0;
1352
1353 if (lmp_rswitch_capable(hdev))
1354 link_policy |= HCI_LP_RSWITCH;
1355 if (lmp_hold_capable(hdev))
1356 link_policy |= HCI_LP_HOLD;
1357 if (lmp_sniff_capable(hdev))
1358 link_policy |= HCI_LP_SNIFF;
1359 if (lmp_park_capable(hdev))
1360 link_policy |= HCI_LP_PARK;
1361
1362 cp.policy = cpu_to_le16(link_policy);
42c6b129 1363 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1364}
1365
42c6b129 1366static void hci_set_le_support(struct hci_request *req)
2177bab5 1367{
42c6b129 1368 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1369 struct hci_cp_write_le_host_supported cp;
1370
c73eee91
JH
1371 /* LE-only devices do not support explicit enablement */
1372 if (!lmp_bredr_capable(hdev))
1373 return;
1374
2177bab5
JH
1375 memset(&cp, 0, sizeof(cp));
1376
1377 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1378 cp.le = 0x01;
1379 cp.simul = lmp_le_br_capable(hdev);
1380 }
1381
1382 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1383 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1384 &cp);
2177bab5
JH
1385}
1386
d62e6d67
JH
1387static void hci_set_event_mask_page_2(struct hci_request *req)
1388{
1389 struct hci_dev *hdev = req->hdev;
1390 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1391
1392 /* If Connectionless Slave Broadcast master role is supported
1393 * enable all necessary events for it.
1394 */
53b834d2 1395 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1396 events[1] |= 0x40; /* Triggered Clock Capture */
1397 events[1] |= 0x80; /* Synchronization Train Complete */
1398 events[2] |= 0x10; /* Slave Page Response Timeout */
1399 events[2] |= 0x20; /* CSB Channel Map Change */
1400 }
1401
1402 /* If Connectionless Slave Broadcast slave role is supported
1403 * enable all necessary events for it.
1404 */
53b834d2 1405 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1406 events[2] |= 0x01; /* Synchronization Train Received */
1407 events[2] |= 0x02; /* CSB Receive */
1408 events[2] |= 0x04; /* CSB Timeout */
1409 events[2] |= 0x08; /* Truncated Page Complete */
1410 }
1411
40c59fcb
MH
1412 /* Enable Authenticated Payload Timeout Expired event if supported */
1413 if (lmp_ping_capable(hdev))
1414 events[2] |= 0x80;
1415
d62e6d67
JH
1416 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1417}
1418
42c6b129 1419static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1420{
42c6b129 1421 struct hci_dev *hdev = req->hdev;
d2c5d77f 1422 u8 p;
42c6b129 1423
b8f4e068
GP
1424 /* Some Broadcom based Bluetooth controllers do not support the
1425 * Delete Stored Link Key command. They are clearly indicating its
1426 * absence in the bit mask of supported commands.
1427 *
1428 * Check the supported commands and only if the the command is marked
1429 * as supported send it. If not supported assume that the controller
1430 * does not have actual support for stored link keys which makes this
1431 * command redundant anyway.
f9f462fa
MH
1432 *
1433 * Some controllers indicate that they support handling deleting
1434 * stored link keys, but they don't. The quirk lets a driver
1435 * just disable this command.
637b4cae 1436 */
f9f462fa
MH
1437 if (hdev->commands[6] & 0x80 &&
1438 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1439 struct hci_cp_delete_stored_link_key cp;
1440
1441 bacpy(&cp.bdaddr, BDADDR_ANY);
1442 cp.delete_all = 0x01;
1443 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1444 sizeof(cp), &cp);
1445 }
1446
2177bab5 1447 if (hdev->commands[5] & 0x10)
42c6b129 1448 hci_setup_link_policy(req);
2177bab5 1449
79830f66 1450 if (lmp_le_capable(hdev)) {
b32bba6c
MH
1451 /* If the controller has a public BD_ADDR, then by default
1452 * use that one. If this is a LE only controller without
1453 * a public address, default to the random address.
1454 *
1455 * For debugging purposes it is possible to force
1456 * controllers with a public address to use the
1457 * random address instead.
1458 */
1459 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
1460 !bacmp(&hdev->bdaddr, BDADDR_ANY))
1461 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1462 else
1463 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
79830f66 1464
42c6b129 1465 hci_set_le_support(req);
79830f66 1466 }
d2c5d77f
JH
1467
1468 /* Read features beyond page 1 if available */
1469 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1470 struct hci_cp_read_local_ext_features cp;
1471
1472 cp.page = p;
1473 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1474 sizeof(cp), &cp);
1475 }
2177bab5
JH
1476}
1477
5d4e7e8d
JH
1478static void hci_init4_req(struct hci_request *req, unsigned long opt)
1479{
1480 struct hci_dev *hdev = req->hdev;
1481
d62e6d67
JH
1482 /* Set event mask page 2 if the HCI command for it is supported */
1483 if (hdev->commands[22] & 0x04)
1484 hci_set_event_mask_page_2(req);
1485
5d4e7e8d 1486 /* Check for Synchronization Train support */
53b834d2 1487 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1488 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1489
1490 /* Enable Secure Connections if supported and configured */
5afeac14
MH
1491 if ((lmp_sc_capable(hdev) ||
1492 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
a6d0d690
MH
1493 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1494 u8 support = 0x01;
1495 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1496 sizeof(support), &support);
1497 }
5d4e7e8d
JH
1498}
1499
2177bab5
JH
1500static int __hci_init(struct hci_dev *hdev)
1501{
1502 int err;
1503
1504 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1505 if (err < 0)
1506 return err;
1507
4b4148e9
MH
1508 /* The Device Under Test (DUT) mode is special and available for
1509 * all controller types. So just create it early on.
1510 */
1511 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1512 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1513 &dut_mode_fops);
1514 }
1515
2177bab5
JH
1516 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1517 * BR/EDR/LE type controllers. AMP controllers only need the
1518 * first stage init.
1519 */
1520 if (hdev->dev_type != HCI_BREDR)
1521 return 0;
1522
1523 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1524 if (err < 0)
1525 return err;
1526
5d4e7e8d
JH
1527 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1528 if (err < 0)
1529 return err;
1530
baf27f6e
MH
1531 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1532 if (err < 0)
1533 return err;
1534
1535 /* Only create debugfs entries during the initial setup
1536 * phase and not every time the controller gets powered on.
1537 */
1538 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1539 return 0;
1540
dfb826a8
MH
1541 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1542 &features_fops);
ceeb3bc0
MH
1543 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1544 &hdev->manufacturer);
1545 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1546 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1547 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1548 &blacklist_fops);
47219839
MH
1549 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1550
baf27f6e
MH
1551 if (lmp_bredr_capable(hdev)) {
1552 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1553 hdev, &inquiry_cache_fops);
02d08d15
MH
1554 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1555 hdev, &link_keys_fops);
babdbb3c
MH
1556 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1557 hdev, &dev_class_fops);
041000b9
MH
1558 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1559 hdev, &voice_setting_fops);
baf27f6e
MH
1560 }
1561
06f5b778 1562 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1563 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1564 hdev, &auto_accept_delay_fops);
06f5b778
MH
1565 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1566 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1567 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1568 hdev, &force_sc_support_fops);
134c2a89
MH
1569 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1570 hdev, &sc_only_mode_fops);
06f5b778 1571 }
ebd1e33b 1572
2bfa3531
MH
1573 if (lmp_sniff_capable(hdev)) {
1574 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1575 hdev, &idle_timeout_fops);
1576 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1577 hdev, &sniff_min_interval_fops);
1578 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1579 hdev, &sniff_max_interval_fops);
1580 }
1581
d0f729b8 1582 if (lmp_le_capable(hdev)) {
7a4cd51d
MH
1583 debugfs_create_file("random_address", 0444, hdev->debugfs,
1584 hdev, &random_address_fops);
b32bba6c
MH
1585 debugfs_create_file("static_address", 0444, hdev->debugfs,
1586 hdev, &static_address_fops);
1587
1588 /* For controllers with a public address, provide a debug
1589 * option to force the usage of the configured static
1590 * address. By default the public address is used.
1591 */
1592 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1593 debugfs_create_file("force_static_address", 0644,
1594 hdev->debugfs, hdev,
1595 &force_static_address_fops);
1596
d0f729b8
MH
1597 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1598 &hdev->le_white_list_size);
3698d704
MH
1599 debugfs_create_file("identity_resolving_keys", 0400,
1600 hdev->debugfs, hdev,
1601 &identity_resolving_keys_fops);
8f8625cd
MH
1602 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1603 hdev, &long_term_keys_fops);
4e70c7e7
MH
1604 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1605 hdev, &conn_min_interval_fops);
1606 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1607 hdev, &conn_max_interval_fops);
89863109
JR
1608 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1609 &lowpan_debugfs_fops);
d0f729b8 1610 }
e7b8fc92 1611
baf27f6e 1612 return 0;
2177bab5
JH
1613}
1614
42c6b129 1615static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1616{
1617 __u8 scan = opt;
1618
42c6b129 1619 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1620
1621 /* Inquiry and Page scans */
42c6b129 1622 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1623}
1624
42c6b129 1625static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1626{
1627 __u8 auth = opt;
1628
42c6b129 1629 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1630
1631 /* Authentication */
42c6b129 1632 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1633}
1634
42c6b129 1635static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1636{
1637 __u8 encrypt = opt;
1638
42c6b129 1639 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1640
e4e8e37c 1641 /* Encryption */
42c6b129 1642 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1643}
1644
42c6b129 1645static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1646{
1647 __le16 policy = cpu_to_le16(opt);
1648
42c6b129 1649 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1650
1651 /* Default link policy */
42c6b129 1652 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1653}
1654
8e87d142 1655/* Get HCI device by index.
1da177e4
LT
1656 * Device is held on return. */
1657struct hci_dev *hci_dev_get(int index)
1658{
8035ded4 1659 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1660
1661 BT_DBG("%d", index);
1662
1663 if (index < 0)
1664 return NULL;
1665
1666 read_lock(&hci_dev_list_lock);
8035ded4 1667 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1668 if (d->id == index) {
1669 hdev = hci_dev_hold(d);
1670 break;
1671 }
1672 }
1673 read_unlock(&hci_dev_list_lock);
1674 return hdev;
1675}
1da177e4
LT
1676
1677/* ---- Inquiry support ---- */
ff9ef578 1678
30dc78e1
JH
1679bool hci_discovery_active(struct hci_dev *hdev)
1680{
1681 struct discovery_state *discov = &hdev->discovery;
1682
6fbe195d 1683 switch (discov->state) {
343f935b 1684 case DISCOVERY_FINDING:
6fbe195d 1685 case DISCOVERY_RESOLVING:
30dc78e1
JH
1686 return true;
1687
6fbe195d
AG
1688 default:
1689 return false;
1690 }
30dc78e1
JH
1691}
1692
ff9ef578
JH
1693void hci_discovery_set_state(struct hci_dev *hdev, int state)
1694{
1695 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1696
1697 if (hdev->discovery.state == state)
1698 return;
1699
1700 switch (state) {
1701 case DISCOVERY_STOPPED:
7b99b659
AG
1702 if (hdev->discovery.state != DISCOVERY_STARTING)
1703 mgmt_discovering(hdev, 0);
ff9ef578
JH
1704 break;
1705 case DISCOVERY_STARTING:
1706 break;
343f935b 1707 case DISCOVERY_FINDING:
ff9ef578
JH
1708 mgmt_discovering(hdev, 1);
1709 break;
30dc78e1
JH
1710 case DISCOVERY_RESOLVING:
1711 break;
ff9ef578
JH
1712 case DISCOVERY_STOPPING:
1713 break;
1714 }
1715
1716 hdev->discovery.state = state;
1717}
1718
1f9b9a5d 1719void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1720{
30883512 1721 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1722 struct inquiry_entry *p, *n;
1da177e4 1723
561aafbc
JH
1724 list_for_each_entry_safe(p, n, &cache->all, all) {
1725 list_del(&p->all);
b57c1a56 1726 kfree(p);
1da177e4 1727 }
561aafbc
JH
1728
1729 INIT_LIST_HEAD(&cache->unknown);
1730 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1731}
1732
a8c5fb1a
GP
1733struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1734 bdaddr_t *bdaddr)
1da177e4 1735{
30883512 1736 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1737 struct inquiry_entry *e;
1738
6ed93dc6 1739 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1740
561aafbc
JH
1741 list_for_each_entry(e, &cache->all, all) {
1742 if (!bacmp(&e->data.bdaddr, bdaddr))
1743 return e;
1744 }
1745
1746 return NULL;
1747}
1748
1749struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1750 bdaddr_t *bdaddr)
561aafbc 1751{
30883512 1752 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1753 struct inquiry_entry *e;
1754
6ed93dc6 1755 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1756
1757 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1758 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1759 return e;
1760 }
1761
1762 return NULL;
1da177e4
LT
1763}
1764
30dc78e1 1765struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1766 bdaddr_t *bdaddr,
1767 int state)
30dc78e1
JH
1768{
1769 struct discovery_state *cache = &hdev->discovery;
1770 struct inquiry_entry *e;
1771
6ed93dc6 1772 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1773
1774 list_for_each_entry(e, &cache->resolve, list) {
1775 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1776 return e;
1777 if (!bacmp(&e->data.bdaddr, bdaddr))
1778 return e;
1779 }
1780
1781 return NULL;
1782}
1783
a3d4e20a 1784void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1785 struct inquiry_entry *ie)
a3d4e20a
JH
1786{
1787 struct discovery_state *cache = &hdev->discovery;
1788 struct list_head *pos = &cache->resolve;
1789 struct inquiry_entry *p;
1790
1791 list_del(&ie->list);
1792
1793 list_for_each_entry(p, &cache->resolve, list) {
1794 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1795 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1796 break;
1797 pos = &p->list;
1798 }
1799
1800 list_add(&ie->list, pos);
1801}
1802
3175405b 1803bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1804 bool name_known, bool *ssp)
1da177e4 1805{
30883512 1806 struct discovery_state *cache = &hdev->discovery;
70f23020 1807 struct inquiry_entry *ie;
1da177e4 1808
6ed93dc6 1809 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1810
2b2fec4d
SJ
1811 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1812
388fc8fa
JH
1813 if (ssp)
1814 *ssp = data->ssp_mode;
1815
70f23020 1816 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1817 if (ie) {
388fc8fa
JH
1818 if (ie->data.ssp_mode && ssp)
1819 *ssp = true;
1820
a3d4e20a 1821 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1822 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1823 ie->data.rssi = data->rssi;
1824 hci_inquiry_cache_update_resolve(hdev, ie);
1825 }
1826
561aafbc 1827 goto update;
a3d4e20a 1828 }
561aafbc
JH
1829
1830 /* Entry not in the cache. Add new one. */
1831 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1832 if (!ie)
3175405b 1833 return false;
561aafbc
JH
1834
1835 list_add(&ie->all, &cache->all);
1836
1837 if (name_known) {
1838 ie->name_state = NAME_KNOWN;
1839 } else {
1840 ie->name_state = NAME_NOT_KNOWN;
1841 list_add(&ie->list, &cache->unknown);
1842 }
70f23020 1843
561aafbc
JH
1844update:
1845 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1846 ie->name_state != NAME_PENDING) {
561aafbc
JH
1847 ie->name_state = NAME_KNOWN;
1848 list_del(&ie->list);
1da177e4
LT
1849 }
1850
70f23020
AE
1851 memcpy(&ie->data, data, sizeof(*data));
1852 ie->timestamp = jiffies;
1da177e4 1853 cache->timestamp = jiffies;
3175405b
JH
1854
1855 if (ie->name_state == NAME_NOT_KNOWN)
1856 return false;
1857
1858 return true;
1da177e4
LT
1859}
1860
1861static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1862{
30883512 1863 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1864 struct inquiry_info *info = (struct inquiry_info *) buf;
1865 struct inquiry_entry *e;
1866 int copied = 0;
1867
561aafbc 1868 list_for_each_entry(e, &cache->all, all) {
1da177e4 1869 struct inquiry_data *data = &e->data;
b57c1a56
JH
1870
1871 if (copied >= num)
1872 break;
1873
1da177e4
LT
1874 bacpy(&info->bdaddr, &data->bdaddr);
1875 info->pscan_rep_mode = data->pscan_rep_mode;
1876 info->pscan_period_mode = data->pscan_period_mode;
1877 info->pscan_mode = data->pscan_mode;
1878 memcpy(info->dev_class, data->dev_class, 3);
1879 info->clock_offset = data->clock_offset;
b57c1a56 1880
1da177e4 1881 info++;
b57c1a56 1882 copied++;
1da177e4
LT
1883 }
1884
1885 BT_DBG("cache %p, copied %d", cache, copied);
1886 return copied;
1887}
1888
42c6b129 1889static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1890{
1891 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1892 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1893 struct hci_cp_inquiry cp;
1894
1895 BT_DBG("%s", hdev->name);
1896
1897 if (test_bit(HCI_INQUIRY, &hdev->flags))
1898 return;
1899
1900 /* Start Inquiry */
1901 memcpy(&cp.lap, &ir->lap, 3);
1902 cp.length = ir->length;
1903 cp.num_rsp = ir->num_rsp;
42c6b129 1904 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1905}
1906
3e13fa1e
AG
1907static int wait_inquiry(void *word)
1908{
1909 schedule();
1910 return signal_pending(current);
1911}
1912
1da177e4
LT
1913int hci_inquiry(void __user *arg)
1914{
1915 __u8 __user *ptr = arg;
1916 struct hci_inquiry_req ir;
1917 struct hci_dev *hdev;
1918 int err = 0, do_inquiry = 0, max_rsp;
1919 long timeo;
1920 __u8 *buf;
1921
1922 if (copy_from_user(&ir, ptr, sizeof(ir)))
1923 return -EFAULT;
1924
5a08ecce
AE
1925 hdev = hci_dev_get(ir.dev_id);
1926 if (!hdev)
1da177e4
LT
1927 return -ENODEV;
1928
0736cfa8
MH
1929 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1930 err = -EBUSY;
1931 goto done;
1932 }
1933
5b69bef5
MH
1934 if (hdev->dev_type != HCI_BREDR) {
1935 err = -EOPNOTSUPP;
1936 goto done;
1937 }
1938
56f87901
JH
1939 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1940 err = -EOPNOTSUPP;
1941 goto done;
1942 }
1943
09fd0de5 1944 hci_dev_lock(hdev);
8e87d142 1945 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1946 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1947 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1948 do_inquiry = 1;
1949 }
09fd0de5 1950 hci_dev_unlock(hdev);
1da177e4 1951
04837f64 1952 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1953
1954 if (do_inquiry) {
01178cd4
JH
1955 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1956 timeo);
70f23020
AE
1957 if (err < 0)
1958 goto done;
3e13fa1e
AG
1959
1960 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1961 * cleared). If it is interrupted by a signal, return -EINTR.
1962 */
1963 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1964 TASK_INTERRUPTIBLE))
1965 return -EINTR;
70f23020 1966 }
1da177e4 1967
8fc9ced3
GP
1968 /* for unlimited number of responses we will use buffer with
1969 * 255 entries
1970 */
1da177e4
LT
1971 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1972
1973 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1974 * copy it to the user space.
1975 */
01df8c31 1976 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1977 if (!buf) {
1da177e4
LT
1978 err = -ENOMEM;
1979 goto done;
1980 }
1981
09fd0de5 1982 hci_dev_lock(hdev);
1da177e4 1983 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1984 hci_dev_unlock(hdev);
1da177e4
LT
1985
1986 BT_DBG("num_rsp %d", ir.num_rsp);
1987
1988 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1989 ptr += sizeof(ir);
1990 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1991 ir.num_rsp))
1da177e4 1992 err = -EFAULT;
8e87d142 1993 } else
1da177e4
LT
1994 err = -EFAULT;
1995
1996 kfree(buf);
1997
1998done:
1999 hci_dev_put(hdev);
2000 return err;
2001}
2002
cbed0ca1 2003static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2004{
1da177e4
LT
2005 int ret = 0;
2006
1da177e4
LT
2007 BT_DBG("%s %p", hdev->name, hdev);
2008
2009 hci_req_lock(hdev);
2010
94324962
JH
2011 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2012 ret = -ENODEV;
2013 goto done;
2014 }
2015
a5c8f270
MH
2016 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2017 /* Check for rfkill but allow the HCI setup stage to
2018 * proceed (which in itself doesn't cause any RF activity).
2019 */
2020 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2021 ret = -ERFKILL;
2022 goto done;
2023 }
2024
2025 /* Check for valid public address or a configured static
2026 * random adddress, but let the HCI setup proceed to
2027 * be able to determine if there is a public address
2028 * or not.
2029 *
c6beca0e
MH
2030 * In case of user channel usage, it is not important
2031 * if a public address or static random address is
2032 * available.
2033 *
a5c8f270
MH
2034 * This check is only valid for BR/EDR controllers
2035 * since AMP controllers do not have an address.
2036 */
c6beca0e
MH
2037 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2038 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2039 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2040 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2041 ret = -EADDRNOTAVAIL;
2042 goto done;
2043 }
611b30f7
MH
2044 }
2045
1da177e4
LT
2046 if (test_bit(HCI_UP, &hdev->flags)) {
2047 ret = -EALREADY;
2048 goto done;
2049 }
2050
1da177e4
LT
2051 if (hdev->open(hdev)) {
2052 ret = -EIO;
2053 goto done;
2054 }
2055
f41c70c4
MH
2056 atomic_set(&hdev->cmd_cnt, 1);
2057 set_bit(HCI_INIT, &hdev->flags);
2058
2059 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2060 ret = hdev->setup(hdev);
2061
2062 if (!ret) {
f41c70c4
MH
2063 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2064 set_bit(HCI_RAW, &hdev->flags);
2065
0736cfa8
MH
2066 if (!test_bit(HCI_RAW, &hdev->flags) &&
2067 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2068 ret = __hci_init(hdev);
1da177e4
LT
2069 }
2070
f41c70c4
MH
2071 clear_bit(HCI_INIT, &hdev->flags);
2072
1da177e4
LT
2073 if (!ret) {
2074 hci_dev_hold(hdev);
2075 set_bit(HCI_UP, &hdev->flags);
2076 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2077 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2078 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2079 hdev->dev_type == HCI_BREDR) {
09fd0de5 2080 hci_dev_lock(hdev);
744cf19e 2081 mgmt_powered(hdev, 1);
09fd0de5 2082 hci_dev_unlock(hdev);
56e5cb86 2083 }
8e87d142 2084 } else {
1da177e4 2085 /* Init failed, cleanup */
3eff45ea 2086 flush_work(&hdev->tx_work);
c347b765 2087 flush_work(&hdev->cmd_work);
b78752cc 2088 flush_work(&hdev->rx_work);
1da177e4
LT
2089
2090 skb_queue_purge(&hdev->cmd_q);
2091 skb_queue_purge(&hdev->rx_q);
2092
2093 if (hdev->flush)
2094 hdev->flush(hdev);
2095
2096 if (hdev->sent_cmd) {
2097 kfree_skb(hdev->sent_cmd);
2098 hdev->sent_cmd = NULL;
2099 }
2100
2101 hdev->close(hdev);
2102 hdev->flags = 0;
2103 }
2104
2105done:
2106 hci_req_unlock(hdev);
1da177e4
LT
2107 return ret;
2108}
2109
cbed0ca1
JH
2110/* ---- HCI ioctl helpers ---- */
2111
2112int hci_dev_open(__u16 dev)
2113{
2114 struct hci_dev *hdev;
2115 int err;
2116
2117 hdev = hci_dev_get(dev);
2118 if (!hdev)
2119 return -ENODEV;
2120
e1d08f40
JH
2121 /* We need to ensure that no other power on/off work is pending
2122 * before proceeding to call hci_dev_do_open. This is
2123 * particularly important if the setup procedure has not yet
2124 * completed.
2125 */
2126 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2127 cancel_delayed_work(&hdev->power_off);
2128
a5c8f270
MH
2129 /* After this call it is guaranteed that the setup procedure
2130 * has finished. This means that error conditions like RFKILL
2131 * or no valid public or static random address apply.
2132 */
e1d08f40
JH
2133 flush_workqueue(hdev->req_workqueue);
2134
cbed0ca1
JH
2135 err = hci_dev_do_open(hdev);
2136
2137 hci_dev_put(hdev);
2138
2139 return err;
2140}
2141
1da177e4
LT
2142static int hci_dev_do_close(struct hci_dev *hdev)
2143{
2144 BT_DBG("%s %p", hdev->name, hdev);
2145
78c04c0b
VCG
2146 cancel_delayed_work(&hdev->power_off);
2147
1da177e4
LT
2148 hci_req_cancel(hdev, ENODEV);
2149 hci_req_lock(hdev);
2150
2151 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2152 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2153 hci_req_unlock(hdev);
2154 return 0;
2155 }
2156
3eff45ea
GP
2157 /* Flush RX and TX works */
2158 flush_work(&hdev->tx_work);
b78752cc 2159 flush_work(&hdev->rx_work);
1da177e4 2160
16ab91ab 2161 if (hdev->discov_timeout > 0) {
e0f9309f 2162 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2163 hdev->discov_timeout = 0;
5e5282bb 2164 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2165 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2166 }
2167
a8b2d5c2 2168 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2169 cancel_delayed_work(&hdev->service_cache);
2170
7ba8b4be
AG
2171 cancel_delayed_work_sync(&hdev->le_scan_disable);
2172
09fd0de5 2173 hci_dev_lock(hdev);
1f9b9a5d 2174 hci_inquiry_cache_flush(hdev);
1da177e4 2175 hci_conn_hash_flush(hdev);
09fd0de5 2176 hci_dev_unlock(hdev);
1da177e4
LT
2177
2178 hci_notify(hdev, HCI_DEV_DOWN);
2179
2180 if (hdev->flush)
2181 hdev->flush(hdev);
2182
2183 /* Reset device */
2184 skb_queue_purge(&hdev->cmd_q);
2185 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2186 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2187 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2188 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2189 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2190 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2191 clear_bit(HCI_INIT, &hdev->flags);
2192 }
2193
c347b765
GP
2194 /* flush cmd work */
2195 flush_work(&hdev->cmd_work);
1da177e4
LT
2196
2197 /* Drop queues */
2198 skb_queue_purge(&hdev->rx_q);
2199 skb_queue_purge(&hdev->cmd_q);
2200 skb_queue_purge(&hdev->raw_q);
2201
2202 /* Drop last sent command */
2203 if (hdev->sent_cmd) {
b79f44c1 2204 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2205 kfree_skb(hdev->sent_cmd);
2206 hdev->sent_cmd = NULL;
2207 }
2208
b6ddb638
JH
2209 kfree_skb(hdev->recv_evt);
2210 hdev->recv_evt = NULL;
2211
1da177e4
LT
2212 /* After this point our queues are empty
2213 * and no tasks are scheduled. */
2214 hdev->close(hdev);
2215
35b973c9
JH
2216 /* Clear flags */
2217 hdev->flags = 0;
2218 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2219
93c311a0
MH
2220 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2221 if (hdev->dev_type == HCI_BREDR) {
2222 hci_dev_lock(hdev);
2223 mgmt_powered(hdev, 0);
2224 hci_dev_unlock(hdev);
2225 }
8ee56540 2226 }
5add6af8 2227
ced5c338 2228 /* Controller radio is available but is currently powered down */
536619e8 2229 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2230
e59fda8d 2231 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2232 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2233 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2234
1da177e4
LT
2235 hci_req_unlock(hdev);
2236
2237 hci_dev_put(hdev);
2238 return 0;
2239}
2240
2241int hci_dev_close(__u16 dev)
2242{
2243 struct hci_dev *hdev;
2244 int err;
2245
70f23020
AE
2246 hdev = hci_dev_get(dev);
2247 if (!hdev)
1da177e4 2248 return -ENODEV;
8ee56540 2249
0736cfa8
MH
2250 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2251 err = -EBUSY;
2252 goto done;
2253 }
2254
8ee56540
MH
2255 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2256 cancel_delayed_work(&hdev->power_off);
2257
1da177e4 2258 err = hci_dev_do_close(hdev);
8ee56540 2259
0736cfa8 2260done:
1da177e4
LT
2261 hci_dev_put(hdev);
2262 return err;
2263}
2264
2265int hci_dev_reset(__u16 dev)
2266{
2267 struct hci_dev *hdev;
2268 int ret = 0;
2269
70f23020
AE
2270 hdev = hci_dev_get(dev);
2271 if (!hdev)
1da177e4
LT
2272 return -ENODEV;
2273
2274 hci_req_lock(hdev);
1da177e4 2275
808a049e
MH
2276 if (!test_bit(HCI_UP, &hdev->flags)) {
2277 ret = -ENETDOWN;
1da177e4 2278 goto done;
808a049e 2279 }
1da177e4 2280
0736cfa8
MH
2281 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2282 ret = -EBUSY;
2283 goto done;
2284 }
2285
1da177e4
LT
2286 /* Drop queues */
2287 skb_queue_purge(&hdev->rx_q);
2288 skb_queue_purge(&hdev->cmd_q);
2289
09fd0de5 2290 hci_dev_lock(hdev);
1f9b9a5d 2291 hci_inquiry_cache_flush(hdev);
1da177e4 2292 hci_conn_hash_flush(hdev);
09fd0de5 2293 hci_dev_unlock(hdev);
1da177e4
LT
2294
2295 if (hdev->flush)
2296 hdev->flush(hdev);
2297
8e87d142 2298 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2299 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2300
2301 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2302 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2303
2304done:
1da177e4
LT
2305 hci_req_unlock(hdev);
2306 hci_dev_put(hdev);
2307 return ret;
2308}
2309
2310int hci_dev_reset_stat(__u16 dev)
2311{
2312 struct hci_dev *hdev;
2313 int ret = 0;
2314
70f23020
AE
2315 hdev = hci_dev_get(dev);
2316 if (!hdev)
1da177e4
LT
2317 return -ENODEV;
2318
0736cfa8
MH
2319 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2320 ret = -EBUSY;
2321 goto done;
2322 }
2323
1da177e4
LT
2324 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2325
0736cfa8 2326done:
1da177e4 2327 hci_dev_put(hdev);
1da177e4
LT
2328 return ret;
2329}
2330
2331int hci_dev_cmd(unsigned int cmd, void __user *arg)
2332{
2333 struct hci_dev *hdev;
2334 struct hci_dev_req dr;
2335 int err = 0;
2336
2337 if (copy_from_user(&dr, arg, sizeof(dr)))
2338 return -EFAULT;
2339
70f23020
AE
2340 hdev = hci_dev_get(dr.dev_id);
2341 if (!hdev)
1da177e4
LT
2342 return -ENODEV;
2343
0736cfa8
MH
2344 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2345 err = -EBUSY;
2346 goto done;
2347 }
2348
5b69bef5
MH
2349 if (hdev->dev_type != HCI_BREDR) {
2350 err = -EOPNOTSUPP;
2351 goto done;
2352 }
2353
56f87901
JH
2354 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2355 err = -EOPNOTSUPP;
2356 goto done;
2357 }
2358
1da177e4
LT
2359 switch (cmd) {
2360 case HCISETAUTH:
01178cd4
JH
2361 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2362 HCI_INIT_TIMEOUT);
1da177e4
LT
2363 break;
2364
2365 case HCISETENCRYPT:
2366 if (!lmp_encrypt_capable(hdev)) {
2367 err = -EOPNOTSUPP;
2368 break;
2369 }
2370
2371 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2372 /* Auth must be enabled first */
01178cd4
JH
2373 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2374 HCI_INIT_TIMEOUT);
1da177e4
LT
2375 if (err)
2376 break;
2377 }
2378
01178cd4
JH
2379 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2380 HCI_INIT_TIMEOUT);
1da177e4
LT
2381 break;
2382
2383 case HCISETSCAN:
01178cd4
JH
2384 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2385 HCI_INIT_TIMEOUT);
1da177e4
LT
2386 break;
2387
1da177e4 2388 case HCISETLINKPOL:
01178cd4
JH
2389 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2390 HCI_INIT_TIMEOUT);
1da177e4
LT
2391 break;
2392
2393 case HCISETLINKMODE:
e4e8e37c
MH
2394 hdev->link_mode = ((__u16) dr.dev_opt) &
2395 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2396 break;
2397
2398 case HCISETPTYPE:
2399 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2400 break;
2401
2402 case HCISETACLMTU:
e4e8e37c
MH
2403 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2404 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2405 break;
2406
2407 case HCISETSCOMTU:
e4e8e37c
MH
2408 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2409 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2410 break;
2411
2412 default:
2413 err = -EINVAL;
2414 break;
2415 }
e4e8e37c 2416
0736cfa8 2417done:
1da177e4
LT
2418 hci_dev_put(hdev);
2419 return err;
2420}
2421
2422int hci_get_dev_list(void __user *arg)
2423{
8035ded4 2424 struct hci_dev *hdev;
1da177e4
LT
2425 struct hci_dev_list_req *dl;
2426 struct hci_dev_req *dr;
1da177e4
LT
2427 int n = 0, size, err;
2428 __u16 dev_num;
2429
2430 if (get_user(dev_num, (__u16 __user *) arg))
2431 return -EFAULT;
2432
2433 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2434 return -EINVAL;
2435
2436 size = sizeof(*dl) + dev_num * sizeof(*dr);
2437
70f23020
AE
2438 dl = kzalloc(size, GFP_KERNEL);
2439 if (!dl)
1da177e4
LT
2440 return -ENOMEM;
2441
2442 dr = dl->dev_req;
2443
f20d09d5 2444 read_lock(&hci_dev_list_lock);
8035ded4 2445 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2446 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2447 cancel_delayed_work(&hdev->power_off);
c542a06c 2448
a8b2d5c2
JH
2449 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2450 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2451
1da177e4
LT
2452 (dr + n)->dev_id = hdev->id;
2453 (dr + n)->dev_opt = hdev->flags;
c542a06c 2454
1da177e4
LT
2455 if (++n >= dev_num)
2456 break;
2457 }
f20d09d5 2458 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2459
2460 dl->dev_num = n;
2461 size = sizeof(*dl) + n * sizeof(*dr);
2462
2463 err = copy_to_user(arg, dl, size);
2464 kfree(dl);
2465
2466 return err ? -EFAULT : 0;
2467}
2468
2469int hci_get_dev_info(void __user *arg)
2470{
2471 struct hci_dev *hdev;
2472 struct hci_dev_info di;
2473 int err = 0;
2474
2475 if (copy_from_user(&di, arg, sizeof(di)))
2476 return -EFAULT;
2477
70f23020
AE
2478 hdev = hci_dev_get(di.dev_id);
2479 if (!hdev)
1da177e4
LT
2480 return -ENODEV;
2481
a8b2d5c2 2482 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2483 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2484
a8b2d5c2
JH
2485 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2486 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2487
1da177e4
LT
2488 strcpy(di.name, hdev->name);
2489 di.bdaddr = hdev->bdaddr;
60f2a3ed 2490 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2491 di.flags = hdev->flags;
2492 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2493 if (lmp_bredr_capable(hdev)) {
2494 di.acl_mtu = hdev->acl_mtu;
2495 di.acl_pkts = hdev->acl_pkts;
2496 di.sco_mtu = hdev->sco_mtu;
2497 di.sco_pkts = hdev->sco_pkts;
2498 } else {
2499 di.acl_mtu = hdev->le_mtu;
2500 di.acl_pkts = hdev->le_pkts;
2501 di.sco_mtu = 0;
2502 di.sco_pkts = 0;
2503 }
1da177e4
LT
2504 di.link_policy = hdev->link_policy;
2505 di.link_mode = hdev->link_mode;
2506
2507 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2508 memcpy(&di.features, &hdev->features, sizeof(di.features));
2509
2510 if (copy_to_user(arg, &di, sizeof(di)))
2511 err = -EFAULT;
2512
2513 hci_dev_put(hdev);
2514
2515 return err;
2516}
2517
2518/* ---- Interface to HCI drivers ---- */
2519
611b30f7
MH
2520static int hci_rfkill_set_block(void *data, bool blocked)
2521{
2522 struct hci_dev *hdev = data;
2523
2524 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2525
0736cfa8
MH
2526 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2527 return -EBUSY;
2528
5e130367
JH
2529 if (blocked) {
2530 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2531 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2532 hci_dev_do_close(hdev);
5e130367
JH
2533 } else {
2534 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2535 }
611b30f7
MH
2536
2537 return 0;
2538}
2539
2540static const struct rfkill_ops hci_rfkill_ops = {
2541 .set_block = hci_rfkill_set_block,
2542};
2543
ab81cbf9
JH
2544static void hci_power_on(struct work_struct *work)
2545{
2546 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2547 int err;
ab81cbf9
JH
2548
2549 BT_DBG("%s", hdev->name);
2550
cbed0ca1 2551 err = hci_dev_do_open(hdev);
96570ffc
JH
2552 if (err < 0) {
2553 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2554 return;
96570ffc 2555 }
ab81cbf9 2556
a5c8f270
MH
2557 /* During the HCI setup phase, a few error conditions are
2558 * ignored and they need to be checked now. If they are still
2559 * valid, it is important to turn the device back off.
2560 */
2561 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2562 (hdev->dev_type == HCI_BREDR &&
2563 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2564 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2565 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2566 hci_dev_do_close(hdev);
2567 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2568 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2569 HCI_AUTO_OFF_TIMEOUT);
bf543036 2570 }
ab81cbf9 2571
a8b2d5c2 2572 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2573 mgmt_index_added(hdev);
ab81cbf9
JH
2574}
2575
2576static void hci_power_off(struct work_struct *work)
2577{
3243553f 2578 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2579 power_off.work);
ab81cbf9
JH
2580
2581 BT_DBG("%s", hdev->name);
2582
8ee56540 2583 hci_dev_do_close(hdev);
ab81cbf9
JH
2584}
2585
16ab91ab
JH
2586static void hci_discov_off(struct work_struct *work)
2587{
2588 struct hci_dev *hdev;
16ab91ab
JH
2589
2590 hdev = container_of(work, struct hci_dev, discov_off.work);
2591
2592 BT_DBG("%s", hdev->name);
2593
d1967ff8 2594 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2595}
2596
35f7498a 2597void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2598{
4821002c 2599 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2600
4821002c
JH
2601 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2602 list_del(&uuid->list);
2aeb9a1a
JH
2603 kfree(uuid);
2604 }
2aeb9a1a
JH
2605}
2606
35f7498a 2607void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2608{
2609 struct list_head *p, *n;
2610
2611 list_for_each_safe(p, n, &hdev->link_keys) {
2612 struct link_key *key;
2613
2614 key = list_entry(p, struct link_key, list);
2615
2616 list_del(p);
2617 kfree(key);
2618 }
55ed8ca1
JH
2619}
2620
35f7498a 2621void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2622{
2623 struct smp_ltk *k, *tmp;
2624
2625 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2626 list_del(&k->list);
2627 kfree(k);
2628 }
b899efaf
VCG
2629}
2630
970c4e46
JH
2631void hci_smp_irks_clear(struct hci_dev *hdev)
2632{
2633 struct smp_irk *k, *tmp;
2634
2635 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2636 list_del(&k->list);
2637 kfree(k);
2638 }
2639}
2640
55ed8ca1
JH
2641struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2642{
8035ded4 2643 struct link_key *k;
55ed8ca1 2644
8035ded4 2645 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2646 if (bacmp(bdaddr, &k->bdaddr) == 0)
2647 return k;
55ed8ca1
JH
2648
2649 return NULL;
2650}
2651
745c0ce3 2652static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2653 u8 key_type, u8 old_key_type)
d25e28ab
JH
2654{
2655 /* Legacy key */
2656 if (key_type < 0x03)
745c0ce3 2657 return true;
d25e28ab
JH
2658
2659 /* Debug keys are insecure so don't store them persistently */
2660 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2661 return false;
d25e28ab
JH
2662
2663 /* Changed combination key and there's no previous one */
2664 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2665 return false;
d25e28ab
JH
2666
2667 /* Security mode 3 case */
2668 if (!conn)
745c0ce3 2669 return true;
d25e28ab
JH
2670
2671 /* Neither local nor remote side had no-bonding as requirement */
2672 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2673 return true;
d25e28ab
JH
2674
2675 /* Local side had dedicated bonding as requirement */
2676 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2677 return true;
d25e28ab
JH
2678
2679 /* Remote side had dedicated bonding as requirement */
2680 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2681 return true;
d25e28ab
JH
2682
2683 /* If none of the above criteria match, then don't store the key
2684 * persistently */
745c0ce3 2685 return false;
d25e28ab
JH
2686}
2687
98a0b845
JH
2688static bool ltk_type_master(u8 type)
2689{
2690 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2691 return true;
2692
2693 return false;
2694}
2695
2696struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2697 bool master)
75d262c2 2698{
c9839a11 2699 struct smp_ltk *k;
75d262c2 2700
c9839a11
VCG
2701 list_for_each_entry(k, &hdev->long_term_keys, list) {
2702 if (k->ediv != ediv ||
a8c5fb1a 2703 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2704 continue;
2705
98a0b845
JH
2706 if (ltk_type_master(k->type) != master)
2707 continue;
2708
c9839a11 2709 return k;
75d262c2
VCG
2710 }
2711
2712 return NULL;
2713}
75d262c2 2714
c9839a11 2715struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2716 u8 addr_type, bool master)
75d262c2 2717{
c9839a11 2718 struct smp_ltk *k;
75d262c2 2719
c9839a11
VCG
2720 list_for_each_entry(k, &hdev->long_term_keys, list)
2721 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2722 bacmp(bdaddr, &k->bdaddr) == 0 &&
2723 ltk_type_master(k->type) == master)
75d262c2
VCG
2724 return k;
2725
2726 return NULL;
2727}
75d262c2 2728
970c4e46
JH
2729struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2730{
2731 struct smp_irk *irk;
2732
2733 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2734 if (!bacmp(&irk->rpa, rpa))
2735 return irk;
2736 }
2737
2738 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2739 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2740 bacpy(&irk->rpa, rpa);
2741 return irk;
2742 }
2743 }
2744
2745 return NULL;
2746}
2747
2748struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2749 u8 addr_type)
2750{
2751 struct smp_irk *irk;
2752
6cfc9988
JH
2753 /* Identity Address must be public or static random */
2754 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2755 return NULL;
2756
970c4e46
JH
2757 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2758 if (addr_type == irk->addr_type &&
2759 bacmp(bdaddr, &irk->bdaddr) == 0)
2760 return irk;
2761 }
2762
2763 return NULL;
2764}
2765
d25e28ab 2766int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2767 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2768{
2769 struct link_key *key, *old_key;
745c0ce3
VA
2770 u8 old_key_type;
2771 bool persistent;
55ed8ca1
JH
2772
2773 old_key = hci_find_link_key(hdev, bdaddr);
2774 if (old_key) {
2775 old_key_type = old_key->type;
2776 key = old_key;
2777 } else {
12adcf3a 2778 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2779 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1
JH
2780 if (!key)
2781 return -ENOMEM;
2782 list_add(&key->list, &hdev->link_keys);
2783 }
2784
6ed93dc6 2785 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2786
d25e28ab
JH
2787 /* Some buggy controller combinations generate a changed
2788 * combination key for legacy pairing even when there's no
2789 * previous key */
2790 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2791 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2792 type = HCI_LK_COMBINATION;
655fe6ec
JH
2793 if (conn)
2794 conn->key_type = type;
2795 }
d25e28ab 2796
55ed8ca1 2797 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2798 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2799 key->pin_len = pin_len;
2800
b6020ba0 2801 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2802 key->type = old_key_type;
4748fed2
JH
2803 else
2804 key->type = type;
2805
4df378a1
JH
2806 if (!new_key)
2807 return 0;
2808
2809 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2810
744cf19e 2811 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2812
6ec5bcad
VA
2813 if (conn)
2814 conn->flush_key = !persistent;
55ed8ca1
JH
2815
2816 return 0;
2817}
2818
ca9142b8 2819struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271
JH
2820 u8 addr_type, u8 type, u8 authenticated,
2821 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
75d262c2 2822{
c9839a11 2823 struct smp_ltk *key, *old_key;
98a0b845 2824 bool master = ltk_type_master(type);
75d262c2 2825
98a0b845 2826 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 2827 if (old_key)
75d262c2 2828 key = old_key;
c9839a11 2829 else {
0a14ab41 2830 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2831 if (!key)
ca9142b8 2832 return NULL;
c9839a11 2833 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2834 }
2835
75d262c2 2836 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2837 key->bdaddr_type = addr_type;
2838 memcpy(key->val, tk, sizeof(key->val));
2839 key->authenticated = authenticated;
2840 key->ediv = ediv;
2841 key->enc_size = enc_size;
2842 key->type = type;
2843 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2844
ca9142b8 2845 return key;
75d262c2
VCG
2846}
2847
ca9142b8
JH
2848struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2849 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2850{
2851 struct smp_irk *irk;
2852
2853 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2854 if (!irk) {
2855 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2856 if (!irk)
ca9142b8 2857 return NULL;
970c4e46
JH
2858
2859 bacpy(&irk->bdaddr, bdaddr);
2860 irk->addr_type = addr_type;
2861
2862 list_add(&irk->list, &hdev->identity_resolving_keys);
2863 }
2864
2865 memcpy(irk->val, val, 16);
2866 bacpy(&irk->rpa, rpa);
2867
ca9142b8 2868 return irk;
970c4e46
JH
2869}
2870
55ed8ca1
JH
2871int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2872{
2873 struct link_key *key;
2874
2875 key = hci_find_link_key(hdev, bdaddr);
2876 if (!key)
2877 return -ENOENT;
2878
6ed93dc6 2879 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2880
2881 list_del(&key->list);
2882 kfree(key);
2883
2884 return 0;
2885}
2886
e0b2b27e 2887int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
2888{
2889 struct smp_ltk *k, *tmp;
c51ffa0b 2890 int removed = 0;
b899efaf
VCG
2891
2892 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 2893 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2894 continue;
2895
6ed93dc6 2896 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2897
2898 list_del(&k->list);
2899 kfree(k);
c51ffa0b 2900 removed++;
b899efaf
VCG
2901 }
2902
c51ffa0b 2903 return removed ? 0 : -ENOENT;
b899efaf
VCG
2904}
2905
a7ec7338
JH
2906void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2907{
2908 struct smp_irk *k, *tmp;
2909
2910 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2911 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2912 continue;
2913
2914 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2915
2916 list_del(&k->list);
2917 kfree(k);
2918 }
2919}
2920
6bd32326 2921/* HCI command timer function */
bda4f23a 2922static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2923{
2924 struct hci_dev *hdev = (void *) arg;
2925
bda4f23a
AE
2926 if (hdev->sent_cmd) {
2927 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2928 u16 opcode = __le16_to_cpu(sent->opcode);
2929
2930 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2931 } else {
2932 BT_ERR("%s command tx timeout", hdev->name);
2933 }
2934
6bd32326 2935 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2936 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2937}
2938
2763eda6 2939struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2940 bdaddr_t *bdaddr)
2763eda6
SJ
2941{
2942 struct oob_data *data;
2943
2944 list_for_each_entry(data, &hdev->remote_oob_data, list)
2945 if (bacmp(bdaddr, &data->bdaddr) == 0)
2946 return data;
2947
2948 return NULL;
2949}
2950
2951int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2952{
2953 struct oob_data *data;
2954
2955 data = hci_find_remote_oob_data(hdev, bdaddr);
2956 if (!data)
2957 return -ENOENT;
2958
6ed93dc6 2959 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2960
2961 list_del(&data->list);
2962 kfree(data);
2963
2964 return 0;
2965}
2966
35f7498a 2967void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2968{
2969 struct oob_data *data, *n;
2970
2971 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2972 list_del(&data->list);
2973 kfree(data);
2974 }
2763eda6
SJ
2975}
2976
0798872e
MH
2977int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2978 u8 *hash, u8 *randomizer)
2763eda6
SJ
2979{
2980 struct oob_data *data;
2981
2982 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 2983 if (!data) {
0a14ab41 2984 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2985 if (!data)
2986 return -ENOMEM;
2987
2988 bacpy(&data->bdaddr, bdaddr);
2989 list_add(&data->list, &hdev->remote_oob_data);
2990 }
2991
519ca9d0
MH
2992 memcpy(data->hash192, hash, sizeof(data->hash192));
2993 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 2994
0798872e
MH
2995 memset(data->hash256, 0, sizeof(data->hash256));
2996 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2997
2998 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2999
3000 return 0;
3001}
3002
3003int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3004 u8 *hash192, u8 *randomizer192,
3005 u8 *hash256, u8 *randomizer256)
3006{
3007 struct oob_data *data;
3008
3009 data = hci_find_remote_oob_data(hdev, bdaddr);
3010 if (!data) {
0a14ab41 3011 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3012 if (!data)
3013 return -ENOMEM;
3014
3015 bacpy(&data->bdaddr, bdaddr);
3016 list_add(&data->list, &hdev->remote_oob_data);
3017 }
3018
3019 memcpy(data->hash192, hash192, sizeof(data->hash192));
3020 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3021
3022 memcpy(data->hash256, hash256, sizeof(data->hash256));
3023 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3024
6ed93dc6 3025 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3026
3027 return 0;
3028}
3029
b9ee0a78
MH
3030struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3031 bdaddr_t *bdaddr, u8 type)
b2a66aad 3032{
8035ded4 3033 struct bdaddr_list *b;
b2a66aad 3034
b9ee0a78
MH
3035 list_for_each_entry(b, &hdev->blacklist, list) {
3036 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3037 return b;
b9ee0a78 3038 }
b2a66aad
AJ
3039
3040 return NULL;
3041}
3042
35f7498a 3043void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3044{
3045 struct list_head *p, *n;
3046
3047 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3048 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3049
3050 list_del(p);
3051 kfree(b);
3052 }
b2a66aad
AJ
3053}
3054
88c1fe4b 3055int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3056{
3057 struct bdaddr_list *entry;
b2a66aad 3058
b9ee0a78 3059 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3060 return -EBADF;
3061
b9ee0a78 3062 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3063 return -EEXIST;
b2a66aad
AJ
3064
3065 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3066 if (!entry)
3067 return -ENOMEM;
b2a66aad
AJ
3068
3069 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3070 entry->bdaddr_type = type;
b2a66aad
AJ
3071
3072 list_add(&entry->list, &hdev->blacklist);
3073
88c1fe4b 3074 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
3075}
3076
88c1fe4b 3077int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3078{
3079 struct bdaddr_list *entry;
b2a66aad 3080
35f7498a
JH
3081 if (!bacmp(bdaddr, BDADDR_ANY)) {
3082 hci_blacklist_clear(hdev);
3083 return 0;
3084 }
b2a66aad 3085
b9ee0a78 3086 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3087 if (!entry)
5e762444 3088 return -ENOENT;
b2a66aad
AJ
3089
3090 list_del(&entry->list);
3091 kfree(entry);
3092
88c1fe4b 3093 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3094}
3095
15819a70
AG
3096/* This function requires the caller holds hdev->lock */
3097struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3098 bdaddr_t *addr, u8 addr_type)
3099{
3100 struct hci_conn_params *params;
3101
3102 list_for_each_entry(params, &hdev->le_conn_params, list) {
3103 if (bacmp(&params->addr, addr) == 0 &&
3104 params->addr_type == addr_type) {
3105 return params;
3106 }
3107 }
3108
3109 return NULL;
3110}
3111
3112/* This function requires the caller holds hdev->lock */
3113void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3114 u16 conn_min_interval, u16 conn_max_interval)
3115{
3116 struct hci_conn_params *params;
3117
3118 params = hci_conn_params_lookup(hdev, addr, addr_type);
3119 if (params) {
3120 params->conn_min_interval = conn_min_interval;
3121 params->conn_max_interval = conn_max_interval;
3122 return;
3123 }
3124
3125 params = kzalloc(sizeof(*params), GFP_KERNEL);
3126 if (!params) {
3127 BT_ERR("Out of memory");
3128 return;
3129 }
3130
3131 bacpy(&params->addr, addr);
3132 params->addr_type = addr_type;
3133 params->conn_min_interval = conn_min_interval;
3134 params->conn_max_interval = conn_max_interval;
3135
3136 list_add(&params->list, &hdev->le_conn_params);
3137
3138 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3139 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3140 conn_max_interval);
3141}
3142
3143/* This function requires the caller holds hdev->lock */
3144void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3145{
3146 struct hci_conn_params *params;
3147
3148 params = hci_conn_params_lookup(hdev, addr, addr_type);
3149 if (!params)
3150 return;
3151
3152 list_del(&params->list);
3153 kfree(params);
3154
3155 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3156}
3157
3158/* This function requires the caller holds hdev->lock */
3159void hci_conn_params_clear(struct hci_dev *hdev)
3160{
3161 struct hci_conn_params *params, *tmp;
3162
3163 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3164 list_del(&params->list);
3165 kfree(params);
3166 }
3167
3168 BT_DBG("All LE connection parameters were removed");
3169}
3170
4c87eaab 3171static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3172{
4c87eaab
AG
3173 if (status) {
3174 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3175
4c87eaab
AG
3176 hci_dev_lock(hdev);
3177 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3178 hci_dev_unlock(hdev);
3179 return;
3180 }
7ba8b4be
AG
3181}
3182
4c87eaab 3183static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3184{
4c87eaab
AG
3185 /* General inquiry access code (GIAC) */
3186 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3187 struct hci_request req;
3188 struct hci_cp_inquiry cp;
7ba8b4be
AG
3189 int err;
3190
4c87eaab
AG
3191 if (status) {
3192 BT_ERR("Failed to disable LE scanning: status %d", status);
3193 return;
3194 }
7ba8b4be 3195
4c87eaab
AG
3196 switch (hdev->discovery.type) {
3197 case DISCOV_TYPE_LE:
3198 hci_dev_lock(hdev);
3199 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3200 hci_dev_unlock(hdev);
3201 break;
7ba8b4be 3202
4c87eaab
AG
3203 case DISCOV_TYPE_INTERLEAVED:
3204 hci_req_init(&req, hdev);
7ba8b4be 3205
4c87eaab
AG
3206 memset(&cp, 0, sizeof(cp));
3207 memcpy(&cp.lap, lap, sizeof(cp.lap));
3208 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3209 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3210
4c87eaab 3211 hci_dev_lock(hdev);
7dbfac1d 3212
4c87eaab 3213 hci_inquiry_cache_flush(hdev);
7dbfac1d 3214
4c87eaab
AG
3215 err = hci_req_run(&req, inquiry_complete);
3216 if (err) {
3217 BT_ERR("Inquiry request failed: err %d", err);
3218 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3219 }
7dbfac1d 3220
4c87eaab
AG
3221 hci_dev_unlock(hdev);
3222 break;
7dbfac1d 3223 }
7dbfac1d
AG
3224}
3225
7ba8b4be
AG
3226static void le_scan_disable_work(struct work_struct *work)
3227{
3228 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3229 le_scan_disable.work);
7ba8b4be 3230 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
3231 struct hci_request req;
3232 int err;
7ba8b4be
AG
3233
3234 BT_DBG("%s", hdev->name);
3235
4c87eaab 3236 hci_req_init(&req, hdev);
28b75a89 3237
7ba8b4be 3238 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
3239 cp.enable = LE_SCAN_DISABLE;
3240 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 3241
4c87eaab
AG
3242 err = hci_req_run(&req, le_scan_disable_work_complete);
3243 if (err)
3244 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3245}
3246
9be0dab7
DH
3247/* Alloc HCI device */
3248struct hci_dev *hci_alloc_dev(void)
3249{
3250 struct hci_dev *hdev;
3251
3252 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3253 if (!hdev)
3254 return NULL;
3255
b1b813d4
DH
3256 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3257 hdev->esco_type = (ESCO_HV1);
3258 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3259 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3260 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3261 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3262 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3263
b1b813d4
DH
3264 hdev->sniff_max_interval = 800;
3265 hdev->sniff_min_interval = 80;
3266
bef64738
MH
3267 hdev->le_scan_interval = 0x0060;
3268 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3269 hdev->le_conn_min_interval = 0x0028;
3270 hdev->le_conn_max_interval = 0x0038;
bef64738 3271
b1b813d4
DH
3272 mutex_init(&hdev->lock);
3273 mutex_init(&hdev->req_lock);
3274
3275 INIT_LIST_HEAD(&hdev->mgmt_pending);
3276 INIT_LIST_HEAD(&hdev->blacklist);
3277 INIT_LIST_HEAD(&hdev->uuids);
3278 INIT_LIST_HEAD(&hdev->link_keys);
3279 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3280 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3281 INIT_LIST_HEAD(&hdev->remote_oob_data);
15819a70 3282 INIT_LIST_HEAD(&hdev->le_conn_params);
6b536b5e 3283 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3284
3285 INIT_WORK(&hdev->rx_work, hci_rx_work);
3286 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3287 INIT_WORK(&hdev->tx_work, hci_tx_work);
3288 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3289
b1b813d4
DH
3290 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3291 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3292 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3293
b1b813d4
DH
3294 skb_queue_head_init(&hdev->rx_q);
3295 skb_queue_head_init(&hdev->cmd_q);
3296 skb_queue_head_init(&hdev->raw_q);
3297
3298 init_waitqueue_head(&hdev->req_wait_q);
3299
bda4f23a 3300 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 3301
b1b813d4
DH
3302 hci_init_sysfs(hdev);
3303 discovery_init(hdev);
9be0dab7
DH
3304
3305 return hdev;
3306}
3307EXPORT_SYMBOL(hci_alloc_dev);
3308
3309/* Free HCI device */
3310void hci_free_dev(struct hci_dev *hdev)
3311{
9be0dab7
DH
3312 /* will free via device release */
3313 put_device(&hdev->dev);
3314}
3315EXPORT_SYMBOL(hci_free_dev);
3316
1da177e4
LT
3317/* Register HCI device */
3318int hci_register_dev(struct hci_dev *hdev)
3319{
b1b813d4 3320 int id, error;
1da177e4 3321
010666a1 3322 if (!hdev->open || !hdev->close)
1da177e4
LT
3323 return -EINVAL;
3324
08add513
MM
3325 /* Do not allow HCI_AMP devices to register at index 0,
3326 * so the index can be used as the AMP controller ID.
3327 */
3df92b31
SL
3328 switch (hdev->dev_type) {
3329 case HCI_BREDR:
3330 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3331 break;
3332 case HCI_AMP:
3333 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3334 break;
3335 default:
3336 return -EINVAL;
1da177e4 3337 }
8e87d142 3338
3df92b31
SL
3339 if (id < 0)
3340 return id;
3341
1da177e4
LT
3342 sprintf(hdev->name, "hci%d", id);
3343 hdev->id = id;
2d8b3a11
AE
3344
3345 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3346
d8537548
KC
3347 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3348 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3349 if (!hdev->workqueue) {
3350 error = -ENOMEM;
3351 goto err;
3352 }
f48fd9c8 3353
d8537548
KC
3354 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3355 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3356 if (!hdev->req_workqueue) {
3357 destroy_workqueue(hdev->workqueue);
3358 error = -ENOMEM;
3359 goto err;
3360 }
3361
0153e2ec
MH
3362 if (!IS_ERR_OR_NULL(bt_debugfs))
3363 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3364
bdc3e0f1
MH
3365 dev_set_name(&hdev->dev, "%s", hdev->name);
3366
99780a7b
JH
3367 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3368 CRYPTO_ALG_ASYNC);
3369 if (IS_ERR(hdev->tfm_aes)) {
3370 BT_ERR("Unable to create crypto context");
3371 error = PTR_ERR(hdev->tfm_aes);
3372 hdev->tfm_aes = NULL;
3373 goto err_wqueue;
3374 }
3375
bdc3e0f1 3376 error = device_add(&hdev->dev);
33ca954d 3377 if (error < 0)
99780a7b 3378 goto err_tfm;
1da177e4 3379
611b30f7 3380 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3381 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3382 hdev);
611b30f7
MH
3383 if (hdev->rfkill) {
3384 if (rfkill_register(hdev->rfkill) < 0) {
3385 rfkill_destroy(hdev->rfkill);
3386 hdev->rfkill = NULL;
3387 }
3388 }
3389
5e130367
JH
3390 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3391 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3392
a8b2d5c2 3393 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3394 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3395
01cd3404 3396 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3397 /* Assume BR/EDR support until proven otherwise (such as
3398 * through reading supported features during init.
3399 */
3400 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3401 }
ce2be9ac 3402
fcee3377
GP
3403 write_lock(&hci_dev_list_lock);
3404 list_add(&hdev->list, &hci_dev_list);
3405 write_unlock(&hci_dev_list_lock);
3406
1da177e4 3407 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3408 hci_dev_hold(hdev);
1da177e4 3409
19202573 3410 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3411
1da177e4 3412 return id;
f48fd9c8 3413
99780a7b
JH
3414err_tfm:
3415 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3416err_wqueue:
3417 destroy_workqueue(hdev->workqueue);
6ead1bbc 3418 destroy_workqueue(hdev->req_workqueue);
33ca954d 3419err:
3df92b31 3420 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3421
33ca954d 3422 return error;
1da177e4
LT
3423}
3424EXPORT_SYMBOL(hci_register_dev);
3425
3426/* Unregister HCI device */
59735631 3427void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3428{
3df92b31 3429 int i, id;
ef222013 3430
c13854ce 3431 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3432
94324962
JH
3433 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3434
3df92b31
SL
3435 id = hdev->id;
3436
f20d09d5 3437 write_lock(&hci_dev_list_lock);
1da177e4 3438 list_del(&hdev->list);
f20d09d5 3439 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3440
3441 hci_dev_do_close(hdev);
3442
cd4c5391 3443 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3444 kfree_skb(hdev->reassembly[i]);
3445
b9b5ef18
GP
3446 cancel_work_sync(&hdev->power_on);
3447
ab81cbf9 3448 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3449 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3450 hci_dev_lock(hdev);
744cf19e 3451 mgmt_index_removed(hdev);
09fd0de5 3452 hci_dev_unlock(hdev);
56e5cb86 3453 }
ab81cbf9 3454
2e58ef3e
JH
3455 /* mgmt_index_removed should take care of emptying the
3456 * pending list */
3457 BUG_ON(!list_empty(&hdev->mgmt_pending));
3458
1da177e4
LT
3459 hci_notify(hdev, HCI_DEV_UNREG);
3460
611b30f7
MH
3461 if (hdev->rfkill) {
3462 rfkill_unregister(hdev->rfkill);
3463 rfkill_destroy(hdev->rfkill);
3464 }
3465
99780a7b
JH
3466 if (hdev->tfm_aes)
3467 crypto_free_blkcipher(hdev->tfm_aes);
3468
bdc3e0f1 3469 device_del(&hdev->dev);
147e2d59 3470
0153e2ec
MH
3471 debugfs_remove_recursive(hdev->debugfs);
3472
f48fd9c8 3473 destroy_workqueue(hdev->workqueue);
6ead1bbc 3474 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3475
09fd0de5 3476 hci_dev_lock(hdev);
e2e0cacb 3477 hci_blacklist_clear(hdev);
2aeb9a1a 3478 hci_uuids_clear(hdev);
55ed8ca1 3479 hci_link_keys_clear(hdev);
b899efaf 3480 hci_smp_ltks_clear(hdev);
970c4e46 3481 hci_smp_irks_clear(hdev);
2763eda6 3482 hci_remote_oob_data_clear(hdev);
15819a70 3483 hci_conn_params_clear(hdev);
09fd0de5 3484 hci_dev_unlock(hdev);
e2e0cacb 3485
dc946bd8 3486 hci_dev_put(hdev);
3df92b31
SL
3487
3488 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3489}
3490EXPORT_SYMBOL(hci_unregister_dev);
3491
3492/* Suspend HCI device */
3493int hci_suspend_dev(struct hci_dev *hdev)
3494{
3495 hci_notify(hdev, HCI_DEV_SUSPEND);
3496 return 0;
3497}
3498EXPORT_SYMBOL(hci_suspend_dev);
3499
3500/* Resume HCI device */
3501int hci_resume_dev(struct hci_dev *hdev)
3502{
3503 hci_notify(hdev, HCI_DEV_RESUME);
3504 return 0;
3505}
3506EXPORT_SYMBOL(hci_resume_dev);
3507
76bca880 3508/* Receive frame from HCI drivers */
e1a26170 3509int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3510{
76bca880 3511 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3512 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3513 kfree_skb(skb);
3514 return -ENXIO;
3515 }
3516
d82603c6 3517 /* Incoming skb */
76bca880
MH
3518 bt_cb(skb)->incoming = 1;
3519
3520 /* Time stamp */
3521 __net_timestamp(skb);
3522
76bca880 3523 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3524 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3525
76bca880
MH
3526 return 0;
3527}
3528EXPORT_SYMBOL(hci_recv_frame);
3529
33e882a5 3530static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3531 int count, __u8 index)
33e882a5
SS
3532{
3533 int len = 0;
3534 int hlen = 0;
3535 int remain = count;
3536 struct sk_buff *skb;
3537 struct bt_skb_cb *scb;
3538
3539 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3540 index >= NUM_REASSEMBLY)
33e882a5
SS
3541 return -EILSEQ;
3542
3543 skb = hdev->reassembly[index];
3544
3545 if (!skb) {
3546 switch (type) {
3547 case HCI_ACLDATA_PKT:
3548 len = HCI_MAX_FRAME_SIZE;
3549 hlen = HCI_ACL_HDR_SIZE;
3550 break;
3551 case HCI_EVENT_PKT:
3552 len = HCI_MAX_EVENT_SIZE;
3553 hlen = HCI_EVENT_HDR_SIZE;
3554 break;
3555 case HCI_SCODATA_PKT:
3556 len = HCI_MAX_SCO_SIZE;
3557 hlen = HCI_SCO_HDR_SIZE;
3558 break;
3559 }
3560
1e429f38 3561 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3562 if (!skb)
3563 return -ENOMEM;
3564
3565 scb = (void *) skb->cb;
3566 scb->expect = hlen;
3567 scb->pkt_type = type;
3568
33e882a5
SS
3569 hdev->reassembly[index] = skb;
3570 }
3571
3572 while (count) {
3573 scb = (void *) skb->cb;
89bb46d0 3574 len = min_t(uint, scb->expect, count);
33e882a5
SS
3575
3576 memcpy(skb_put(skb, len), data, len);
3577
3578 count -= len;
3579 data += len;
3580 scb->expect -= len;
3581 remain = count;
3582
3583 switch (type) {
3584 case HCI_EVENT_PKT:
3585 if (skb->len == HCI_EVENT_HDR_SIZE) {
3586 struct hci_event_hdr *h = hci_event_hdr(skb);
3587 scb->expect = h->plen;
3588
3589 if (skb_tailroom(skb) < scb->expect) {
3590 kfree_skb(skb);
3591 hdev->reassembly[index] = NULL;
3592 return -ENOMEM;
3593 }
3594 }
3595 break;
3596
3597 case HCI_ACLDATA_PKT:
3598 if (skb->len == HCI_ACL_HDR_SIZE) {
3599 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3600 scb->expect = __le16_to_cpu(h->dlen);
3601
3602 if (skb_tailroom(skb) < scb->expect) {
3603 kfree_skb(skb);
3604 hdev->reassembly[index] = NULL;
3605 return -ENOMEM;
3606 }
3607 }
3608 break;
3609
3610 case HCI_SCODATA_PKT:
3611 if (skb->len == HCI_SCO_HDR_SIZE) {
3612 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3613 scb->expect = h->dlen;
3614
3615 if (skb_tailroom(skb) < scb->expect) {
3616 kfree_skb(skb);
3617 hdev->reassembly[index] = NULL;
3618 return -ENOMEM;
3619 }
3620 }
3621 break;
3622 }
3623
3624 if (scb->expect == 0) {
3625 /* Complete frame */
3626
3627 bt_cb(skb)->pkt_type = type;
e1a26170 3628 hci_recv_frame(hdev, skb);
33e882a5
SS
3629
3630 hdev->reassembly[index] = NULL;
3631 return remain;
3632 }
3633 }
3634
3635 return remain;
3636}
3637
ef222013
MH
3638int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3639{
f39a3c06
SS
3640 int rem = 0;
3641
ef222013
MH
3642 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3643 return -EILSEQ;
3644
da5f6c37 3645 while (count) {
1e429f38 3646 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
3647 if (rem < 0)
3648 return rem;
ef222013 3649
f39a3c06
SS
3650 data += (count - rem);
3651 count = rem;
f81c6224 3652 }
ef222013 3653
f39a3c06 3654 return rem;
ef222013
MH
3655}
3656EXPORT_SYMBOL(hci_recv_fragment);
3657
99811510
SS
3658#define STREAM_REASSEMBLY 0
3659
3660int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3661{
3662 int type;
3663 int rem = 0;
3664
da5f6c37 3665 while (count) {
99811510
SS
3666 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3667
3668 if (!skb) {
3669 struct { char type; } *pkt;
3670
3671 /* Start of the frame */
3672 pkt = data;
3673 type = pkt->type;
3674
3675 data++;
3676 count--;
3677 } else
3678 type = bt_cb(skb)->pkt_type;
3679
1e429f38 3680 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3681 STREAM_REASSEMBLY);
99811510
SS
3682 if (rem < 0)
3683 return rem;
3684
3685 data += (count - rem);
3686 count = rem;
f81c6224 3687 }
99811510
SS
3688
3689 return rem;
3690}
3691EXPORT_SYMBOL(hci_recv_stream_fragment);
3692
1da177e4
LT
3693/* ---- Interface to upper protocols ---- */
3694
1da177e4
LT
3695int hci_register_cb(struct hci_cb *cb)
3696{
3697 BT_DBG("%p name %s", cb, cb->name);
3698
f20d09d5 3699 write_lock(&hci_cb_list_lock);
1da177e4 3700 list_add(&cb->list, &hci_cb_list);
f20d09d5 3701 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3702
3703 return 0;
3704}
3705EXPORT_SYMBOL(hci_register_cb);
3706
3707int hci_unregister_cb(struct hci_cb *cb)
3708{
3709 BT_DBG("%p name %s", cb, cb->name);
3710
f20d09d5 3711 write_lock(&hci_cb_list_lock);
1da177e4 3712 list_del(&cb->list);
f20d09d5 3713 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3714
3715 return 0;
3716}
3717EXPORT_SYMBOL(hci_unregister_cb);
3718
51086991 3719static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3720{
0d48d939 3721 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3722
cd82e61c
MH
3723 /* Time stamp */
3724 __net_timestamp(skb);
1da177e4 3725
cd82e61c
MH
3726 /* Send copy to monitor */
3727 hci_send_to_monitor(hdev, skb);
3728
3729 if (atomic_read(&hdev->promisc)) {
3730 /* Send copy to the sockets */
470fe1b5 3731 hci_send_to_sock(hdev, skb);
1da177e4
LT
3732 }
3733
3734 /* Get rid of skb owner, prior to sending to the driver. */
3735 skb_orphan(skb);
3736
7bd8f09f 3737 if (hdev->send(hdev, skb) < 0)
51086991 3738 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
3739}
3740
3119ae95
JH
3741void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3742{
3743 skb_queue_head_init(&req->cmd_q);
3744 req->hdev = hdev;
5d73e034 3745 req->err = 0;
3119ae95
JH
3746}
3747
3748int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3749{
3750 struct hci_dev *hdev = req->hdev;
3751 struct sk_buff *skb;
3752 unsigned long flags;
3753
3754 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3755
5d73e034
AG
3756 /* If an error occured during request building, remove all HCI
3757 * commands queued on the HCI request queue.
3758 */
3759 if (req->err) {
3760 skb_queue_purge(&req->cmd_q);
3761 return req->err;
3762 }
3763
3119ae95
JH
3764 /* Do not allow empty requests */
3765 if (skb_queue_empty(&req->cmd_q))
382b0c39 3766 return -ENODATA;
3119ae95
JH
3767
3768 skb = skb_peek_tail(&req->cmd_q);
3769 bt_cb(skb)->req.complete = complete;
3770
3771 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3772 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3773 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3774
3775 queue_work(hdev->workqueue, &hdev->cmd_work);
3776
3777 return 0;
3778}
3779
1ca3a9d0 3780static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 3781 u32 plen, const void *param)
1da177e4
LT
3782{
3783 int len = HCI_COMMAND_HDR_SIZE + plen;
3784 struct hci_command_hdr *hdr;
3785 struct sk_buff *skb;
3786
1da177e4 3787 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
3788 if (!skb)
3789 return NULL;
1da177e4
LT
3790
3791 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 3792 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
3793 hdr->plen = plen;
3794
3795 if (plen)
3796 memcpy(skb_put(skb, plen), param, plen);
3797
3798 BT_DBG("skb len %d", skb->len);
3799
0d48d939 3800 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 3801
1ca3a9d0
JH
3802 return skb;
3803}
3804
3805/* Send HCI command */
07dc93dd
JH
3806int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3807 const void *param)
1ca3a9d0
JH
3808{
3809 struct sk_buff *skb;
3810
3811 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3812
3813 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3814 if (!skb) {
3815 BT_ERR("%s no memory for command", hdev->name);
3816 return -ENOMEM;
3817 }
3818
11714b3d
JH
3819 /* Stand-alone HCI commands must be flaged as
3820 * single-command requests.
3821 */
3822 bt_cb(skb)->req.start = true;
3823
1da177e4 3824 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3825 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3826
3827 return 0;
3828}
1da177e4 3829
71c76a17 3830/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
3831void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3832 const void *param, u8 event)
71c76a17
JH
3833{
3834 struct hci_dev *hdev = req->hdev;
3835 struct sk_buff *skb;
3836
3837 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3838
34739c1e
AG
3839 /* If an error occured during request building, there is no point in
3840 * queueing the HCI command. We can simply return.
3841 */
3842 if (req->err)
3843 return;
3844
71c76a17
JH
3845 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3846 if (!skb) {
5d73e034
AG
3847 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3848 hdev->name, opcode);
3849 req->err = -ENOMEM;
e348fe6b 3850 return;
71c76a17
JH
3851 }
3852
3853 if (skb_queue_empty(&req->cmd_q))
3854 bt_cb(skb)->req.start = true;
3855
02350a72
JH
3856 bt_cb(skb)->req.event = event;
3857
71c76a17 3858 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
3859}
3860
07dc93dd
JH
3861void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3862 const void *param)
02350a72
JH
3863{
3864 hci_req_add_ev(req, opcode, plen, param, 0);
3865}
3866
1da177e4 3867/* Get data from the previously sent command */
a9de9248 3868void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3869{
3870 struct hci_command_hdr *hdr;
3871
3872 if (!hdev->sent_cmd)
3873 return NULL;
3874
3875 hdr = (void *) hdev->sent_cmd->data;
3876
a9de9248 3877 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3878 return NULL;
3879
f0e09510 3880 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3881
3882 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3883}
3884
3885/* Send ACL data */
3886static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3887{
3888 struct hci_acl_hdr *hdr;
3889 int len = skb->len;
3890
badff6d0
ACM
3891 skb_push(skb, HCI_ACL_HDR_SIZE);
3892 skb_reset_transport_header(skb);
9c70220b 3893 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3894 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3895 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3896}
3897
ee22be7e 3898static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3899 struct sk_buff *skb, __u16 flags)
1da177e4 3900{
ee22be7e 3901 struct hci_conn *conn = chan->conn;
1da177e4
LT
3902 struct hci_dev *hdev = conn->hdev;
3903 struct sk_buff *list;
3904
087bfd99
GP
3905 skb->len = skb_headlen(skb);
3906 skb->data_len = 0;
3907
3908 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3909
3910 switch (hdev->dev_type) {
3911 case HCI_BREDR:
3912 hci_add_acl_hdr(skb, conn->handle, flags);
3913 break;
3914 case HCI_AMP:
3915 hci_add_acl_hdr(skb, chan->handle, flags);
3916 break;
3917 default:
3918 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3919 return;
3920 }
087bfd99 3921
70f23020
AE
3922 list = skb_shinfo(skb)->frag_list;
3923 if (!list) {
1da177e4
LT
3924 /* Non fragmented */
3925 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3926
73d80deb 3927 skb_queue_tail(queue, skb);
1da177e4
LT
3928 } else {
3929 /* Fragmented */
3930 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3931
3932 skb_shinfo(skb)->frag_list = NULL;
3933
3934 /* Queue all fragments atomically */
af3e6359 3935 spin_lock(&queue->lock);
1da177e4 3936
73d80deb 3937 __skb_queue_tail(queue, skb);
e702112f
AE
3938
3939 flags &= ~ACL_START;
3940 flags |= ACL_CONT;
1da177e4
LT
3941 do {
3942 skb = list; list = list->next;
8e87d142 3943
0d48d939 3944 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3945 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3946
3947 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3948
73d80deb 3949 __skb_queue_tail(queue, skb);
1da177e4
LT
3950 } while (list);
3951
af3e6359 3952 spin_unlock(&queue->lock);
1da177e4 3953 }
73d80deb
LAD
3954}
3955
3956void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3957{
ee22be7e 3958 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3959
f0e09510 3960 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3961
ee22be7e 3962 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3963
3eff45ea 3964 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3965}
1da177e4
LT
3966
3967/* Send SCO data */
0d861d8b 3968void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3969{
3970 struct hci_dev *hdev = conn->hdev;
3971 struct hci_sco_hdr hdr;
3972
3973 BT_DBG("%s len %d", hdev->name, skb->len);
3974
aca3192c 3975 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3976 hdr.dlen = skb->len;
3977
badff6d0
ACM
3978 skb_push(skb, HCI_SCO_HDR_SIZE);
3979 skb_reset_transport_header(skb);
9c70220b 3980 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3981
0d48d939 3982 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3983
1da177e4 3984 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3985 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3986}
1da177e4
LT
3987
3988/* ---- HCI TX task (outgoing data) ---- */
3989
3990/* HCI Connection scheduler */
6039aa73
GP
3991static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3992 int *quote)
1da177e4
LT
3993{
3994 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3995 struct hci_conn *conn = NULL, *c;
abc5de8f 3996 unsigned int num = 0, min = ~0;
1da177e4 3997
8e87d142 3998 /* We don't have to lock device here. Connections are always
1da177e4 3999 * added and removed with TX task disabled. */
bf4c6325
GP
4000
4001 rcu_read_lock();
4002
4003 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4004 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4005 continue;
769be974
MH
4006
4007 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4008 continue;
4009
1da177e4
LT
4010 num++;
4011
4012 if (c->sent < min) {
4013 min = c->sent;
4014 conn = c;
4015 }
52087a79
LAD
4016
4017 if (hci_conn_num(hdev, type) == num)
4018 break;
1da177e4
LT
4019 }
4020
bf4c6325
GP
4021 rcu_read_unlock();
4022
1da177e4 4023 if (conn) {
6ed58ec5
VT
4024 int cnt, q;
4025
4026 switch (conn->type) {
4027 case ACL_LINK:
4028 cnt = hdev->acl_cnt;
4029 break;
4030 case SCO_LINK:
4031 case ESCO_LINK:
4032 cnt = hdev->sco_cnt;
4033 break;
4034 case LE_LINK:
4035 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4036 break;
4037 default:
4038 cnt = 0;
4039 BT_ERR("Unknown link type");
4040 }
4041
4042 q = cnt / num;
1da177e4
LT
4043 *quote = q ? q : 1;
4044 } else
4045 *quote = 0;
4046
4047 BT_DBG("conn %p quote %d", conn, *quote);
4048 return conn;
4049}
4050
6039aa73 4051static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4052{
4053 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4054 struct hci_conn *c;
1da177e4 4055
bae1f5d9 4056 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4057
bf4c6325
GP
4058 rcu_read_lock();
4059
1da177e4 4060 /* Kill stalled connections */
bf4c6325 4061 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4062 if (c->type == type && c->sent) {
6ed93dc6
AE
4063 BT_ERR("%s killing stalled connection %pMR",
4064 hdev->name, &c->dst);
bed71748 4065 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4066 }
4067 }
bf4c6325
GP
4068
4069 rcu_read_unlock();
1da177e4
LT
4070}
4071
6039aa73
GP
4072static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4073 int *quote)
1da177e4 4074{
73d80deb
LAD
4075 struct hci_conn_hash *h = &hdev->conn_hash;
4076 struct hci_chan *chan = NULL;
abc5de8f 4077 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4078 struct hci_conn *conn;
73d80deb
LAD
4079 int cnt, q, conn_num = 0;
4080
4081 BT_DBG("%s", hdev->name);
4082
bf4c6325
GP
4083 rcu_read_lock();
4084
4085 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4086 struct hci_chan *tmp;
4087
4088 if (conn->type != type)
4089 continue;
4090
4091 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4092 continue;
4093
4094 conn_num++;
4095
8192edef 4096 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4097 struct sk_buff *skb;
4098
4099 if (skb_queue_empty(&tmp->data_q))
4100 continue;
4101
4102 skb = skb_peek(&tmp->data_q);
4103 if (skb->priority < cur_prio)
4104 continue;
4105
4106 if (skb->priority > cur_prio) {
4107 num = 0;
4108 min = ~0;
4109 cur_prio = skb->priority;
4110 }
4111
4112 num++;
4113
4114 if (conn->sent < min) {
4115 min = conn->sent;
4116 chan = tmp;
4117 }
4118 }
4119
4120 if (hci_conn_num(hdev, type) == conn_num)
4121 break;
4122 }
4123
bf4c6325
GP
4124 rcu_read_unlock();
4125
73d80deb
LAD
4126 if (!chan)
4127 return NULL;
4128
4129 switch (chan->conn->type) {
4130 case ACL_LINK:
4131 cnt = hdev->acl_cnt;
4132 break;
bd1eb66b
AE
4133 case AMP_LINK:
4134 cnt = hdev->block_cnt;
4135 break;
73d80deb
LAD
4136 case SCO_LINK:
4137 case ESCO_LINK:
4138 cnt = hdev->sco_cnt;
4139 break;
4140 case LE_LINK:
4141 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4142 break;
4143 default:
4144 cnt = 0;
4145 BT_ERR("Unknown link type");
4146 }
4147
4148 q = cnt / num;
4149 *quote = q ? q : 1;
4150 BT_DBG("chan %p quote %d", chan, *quote);
4151 return chan;
4152}
4153
02b20f0b
LAD
4154static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4155{
4156 struct hci_conn_hash *h = &hdev->conn_hash;
4157 struct hci_conn *conn;
4158 int num = 0;
4159
4160 BT_DBG("%s", hdev->name);
4161
bf4c6325
GP
4162 rcu_read_lock();
4163
4164 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4165 struct hci_chan *chan;
4166
4167 if (conn->type != type)
4168 continue;
4169
4170 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4171 continue;
4172
4173 num++;
4174
8192edef 4175 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4176 struct sk_buff *skb;
4177
4178 if (chan->sent) {
4179 chan->sent = 0;
4180 continue;
4181 }
4182
4183 if (skb_queue_empty(&chan->data_q))
4184 continue;
4185
4186 skb = skb_peek(&chan->data_q);
4187 if (skb->priority >= HCI_PRIO_MAX - 1)
4188 continue;
4189
4190 skb->priority = HCI_PRIO_MAX - 1;
4191
4192 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4193 skb->priority);
02b20f0b
LAD
4194 }
4195
4196 if (hci_conn_num(hdev, type) == num)
4197 break;
4198 }
bf4c6325
GP
4199
4200 rcu_read_unlock();
4201
02b20f0b
LAD
4202}
4203
b71d385a
AE
4204static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4205{
4206 /* Calculate count of blocks used by this packet */
4207 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4208}
4209
6039aa73 4210static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4211{
1da177e4
LT
4212 if (!test_bit(HCI_RAW, &hdev->flags)) {
4213 /* ACL tx timeout must be longer than maximum
4214 * link supervision timeout (40.9 seconds) */
63d2bc1b 4215 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4216 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4217 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4218 }
63d2bc1b 4219}
1da177e4 4220
6039aa73 4221static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4222{
4223 unsigned int cnt = hdev->acl_cnt;
4224 struct hci_chan *chan;
4225 struct sk_buff *skb;
4226 int quote;
4227
4228 __check_timeout(hdev, cnt);
04837f64 4229
73d80deb 4230 while (hdev->acl_cnt &&
a8c5fb1a 4231 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4232 u32 priority = (skb_peek(&chan->data_q))->priority;
4233 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4234 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4235 skb->len, skb->priority);
73d80deb 4236
ec1cce24
LAD
4237 /* Stop if priority has changed */
4238 if (skb->priority < priority)
4239 break;
4240
4241 skb = skb_dequeue(&chan->data_q);
4242
73d80deb 4243 hci_conn_enter_active_mode(chan->conn,
04124681 4244 bt_cb(skb)->force_active);
04837f64 4245
57d17d70 4246 hci_send_frame(hdev, skb);
1da177e4
LT
4247 hdev->acl_last_tx = jiffies;
4248
4249 hdev->acl_cnt--;
73d80deb
LAD
4250 chan->sent++;
4251 chan->conn->sent++;
1da177e4
LT
4252 }
4253 }
02b20f0b
LAD
4254
4255 if (cnt != hdev->acl_cnt)
4256 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4257}
4258
6039aa73 4259static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4260{
63d2bc1b 4261 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4262 struct hci_chan *chan;
4263 struct sk_buff *skb;
4264 int quote;
bd1eb66b 4265 u8 type;
b71d385a 4266
63d2bc1b 4267 __check_timeout(hdev, cnt);
b71d385a 4268
bd1eb66b
AE
4269 BT_DBG("%s", hdev->name);
4270
4271 if (hdev->dev_type == HCI_AMP)
4272 type = AMP_LINK;
4273 else
4274 type = ACL_LINK;
4275
b71d385a 4276 while (hdev->block_cnt > 0 &&
bd1eb66b 4277 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4278 u32 priority = (skb_peek(&chan->data_q))->priority;
4279 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4280 int blocks;
4281
4282 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4283 skb->len, skb->priority);
b71d385a
AE
4284
4285 /* Stop if priority has changed */
4286 if (skb->priority < priority)
4287 break;
4288
4289 skb = skb_dequeue(&chan->data_q);
4290
4291 blocks = __get_blocks(hdev, skb);
4292 if (blocks > hdev->block_cnt)
4293 return;
4294
4295 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4296 bt_cb(skb)->force_active);
b71d385a 4297
57d17d70 4298 hci_send_frame(hdev, skb);
b71d385a
AE
4299 hdev->acl_last_tx = jiffies;
4300
4301 hdev->block_cnt -= blocks;
4302 quote -= blocks;
4303
4304 chan->sent += blocks;
4305 chan->conn->sent += blocks;
4306 }
4307 }
4308
4309 if (cnt != hdev->block_cnt)
bd1eb66b 4310 hci_prio_recalculate(hdev, type);
b71d385a
AE
4311}
4312
6039aa73 4313static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4314{
4315 BT_DBG("%s", hdev->name);
4316
bd1eb66b
AE
4317 /* No ACL link over BR/EDR controller */
4318 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4319 return;
4320
4321 /* No AMP link over AMP controller */
4322 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4323 return;
4324
4325 switch (hdev->flow_ctl_mode) {
4326 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4327 hci_sched_acl_pkt(hdev);
4328 break;
4329
4330 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4331 hci_sched_acl_blk(hdev);
4332 break;
4333 }
4334}
4335
1da177e4 4336/* Schedule SCO */
6039aa73 4337static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4338{
4339 struct hci_conn *conn;
4340 struct sk_buff *skb;
4341 int quote;
4342
4343 BT_DBG("%s", hdev->name);
4344
52087a79
LAD
4345 if (!hci_conn_num(hdev, SCO_LINK))
4346 return;
4347
1da177e4
LT
4348 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4349 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4350 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4351 hci_send_frame(hdev, skb);
1da177e4
LT
4352
4353 conn->sent++;
4354 if (conn->sent == ~0)
4355 conn->sent = 0;
4356 }
4357 }
4358}
4359
6039aa73 4360static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4361{
4362 struct hci_conn *conn;
4363 struct sk_buff *skb;
4364 int quote;
4365
4366 BT_DBG("%s", hdev->name);
4367
52087a79
LAD
4368 if (!hci_conn_num(hdev, ESCO_LINK))
4369 return;
4370
8fc9ced3
GP
4371 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4372 &quote))) {
b6a0dc82
MH
4373 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4374 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4375 hci_send_frame(hdev, skb);
b6a0dc82
MH
4376
4377 conn->sent++;
4378 if (conn->sent == ~0)
4379 conn->sent = 0;
4380 }
4381 }
4382}
4383
6039aa73 4384static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4385{
73d80deb 4386 struct hci_chan *chan;
6ed58ec5 4387 struct sk_buff *skb;
02b20f0b 4388 int quote, cnt, tmp;
6ed58ec5
VT
4389
4390 BT_DBG("%s", hdev->name);
4391
52087a79
LAD
4392 if (!hci_conn_num(hdev, LE_LINK))
4393 return;
4394
6ed58ec5
VT
4395 if (!test_bit(HCI_RAW, &hdev->flags)) {
4396 /* LE tx timeout must be longer than maximum
4397 * link supervision timeout (40.9 seconds) */
bae1f5d9 4398 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4399 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4400 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4401 }
4402
4403 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4404 tmp = cnt;
73d80deb 4405 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4406 u32 priority = (skb_peek(&chan->data_q))->priority;
4407 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4408 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4409 skb->len, skb->priority);
6ed58ec5 4410
ec1cce24
LAD
4411 /* Stop if priority has changed */
4412 if (skb->priority < priority)
4413 break;
4414
4415 skb = skb_dequeue(&chan->data_q);
4416
57d17d70 4417 hci_send_frame(hdev, skb);
6ed58ec5
VT
4418 hdev->le_last_tx = jiffies;
4419
4420 cnt--;
73d80deb
LAD
4421 chan->sent++;
4422 chan->conn->sent++;
6ed58ec5
VT
4423 }
4424 }
73d80deb 4425
6ed58ec5
VT
4426 if (hdev->le_pkts)
4427 hdev->le_cnt = cnt;
4428 else
4429 hdev->acl_cnt = cnt;
02b20f0b
LAD
4430
4431 if (cnt != tmp)
4432 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4433}
4434
3eff45ea 4435static void hci_tx_work(struct work_struct *work)
1da177e4 4436{
3eff45ea 4437 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4438 struct sk_buff *skb;
4439
6ed58ec5 4440 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4441 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4442
52de599e
MH
4443 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4444 /* Schedule queues and send stuff to HCI driver */
4445 hci_sched_acl(hdev);
4446 hci_sched_sco(hdev);
4447 hci_sched_esco(hdev);
4448 hci_sched_le(hdev);
4449 }
6ed58ec5 4450
1da177e4
LT
4451 /* Send next queued raw (unknown type) packet */
4452 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4453 hci_send_frame(hdev, skb);
1da177e4
LT
4454}
4455
25985edc 4456/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4457
4458/* ACL data packet */
6039aa73 4459static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4460{
4461 struct hci_acl_hdr *hdr = (void *) skb->data;
4462 struct hci_conn *conn;
4463 __u16 handle, flags;
4464
4465 skb_pull(skb, HCI_ACL_HDR_SIZE);
4466
4467 handle = __le16_to_cpu(hdr->handle);
4468 flags = hci_flags(handle);
4469 handle = hci_handle(handle);
4470
f0e09510 4471 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4472 handle, flags);
1da177e4
LT
4473
4474 hdev->stat.acl_rx++;
4475
4476 hci_dev_lock(hdev);
4477 conn = hci_conn_hash_lookup_handle(hdev, handle);
4478 hci_dev_unlock(hdev);
8e87d142 4479
1da177e4 4480 if (conn) {
65983fc7 4481 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4482
1da177e4 4483 /* Send to upper protocol */
686ebf28
UF
4484 l2cap_recv_acldata(conn, skb, flags);
4485 return;
1da177e4 4486 } else {
8e87d142 4487 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4488 hdev->name, handle);
1da177e4
LT
4489 }
4490
4491 kfree_skb(skb);
4492}
4493
4494/* SCO data packet */
6039aa73 4495static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4496{
4497 struct hci_sco_hdr *hdr = (void *) skb->data;
4498 struct hci_conn *conn;
4499 __u16 handle;
4500
4501 skb_pull(skb, HCI_SCO_HDR_SIZE);
4502
4503 handle = __le16_to_cpu(hdr->handle);
4504
f0e09510 4505 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4506
4507 hdev->stat.sco_rx++;
4508
4509 hci_dev_lock(hdev);
4510 conn = hci_conn_hash_lookup_handle(hdev, handle);
4511 hci_dev_unlock(hdev);
4512
4513 if (conn) {
1da177e4 4514 /* Send to upper protocol */
686ebf28
UF
4515 sco_recv_scodata(conn, skb);
4516 return;
1da177e4 4517 } else {
8e87d142 4518 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4519 hdev->name, handle);
1da177e4
LT
4520 }
4521
4522 kfree_skb(skb);
4523}
4524
9238f36a
JH
4525static bool hci_req_is_complete(struct hci_dev *hdev)
4526{
4527 struct sk_buff *skb;
4528
4529 skb = skb_peek(&hdev->cmd_q);
4530 if (!skb)
4531 return true;
4532
4533 return bt_cb(skb)->req.start;
4534}
4535
42c6b129
JH
4536static void hci_resend_last(struct hci_dev *hdev)
4537{
4538 struct hci_command_hdr *sent;
4539 struct sk_buff *skb;
4540 u16 opcode;
4541
4542 if (!hdev->sent_cmd)
4543 return;
4544
4545 sent = (void *) hdev->sent_cmd->data;
4546 opcode = __le16_to_cpu(sent->opcode);
4547 if (opcode == HCI_OP_RESET)
4548 return;
4549
4550 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4551 if (!skb)
4552 return;
4553
4554 skb_queue_head(&hdev->cmd_q, skb);
4555 queue_work(hdev->workqueue, &hdev->cmd_work);
4556}
4557
9238f36a
JH
4558void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4559{
4560 hci_req_complete_t req_complete = NULL;
4561 struct sk_buff *skb;
4562 unsigned long flags;
4563
4564 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4565
42c6b129
JH
4566 /* If the completed command doesn't match the last one that was
4567 * sent we need to do special handling of it.
9238f36a 4568 */
42c6b129
JH
4569 if (!hci_sent_cmd_data(hdev, opcode)) {
4570 /* Some CSR based controllers generate a spontaneous
4571 * reset complete event during init and any pending
4572 * command will never be completed. In such a case we
4573 * need to resend whatever was the last sent
4574 * command.
4575 */
4576 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4577 hci_resend_last(hdev);
4578
9238f36a 4579 return;
42c6b129 4580 }
9238f36a
JH
4581
4582 /* If the command succeeded and there's still more commands in
4583 * this request the request is not yet complete.
4584 */
4585 if (!status && !hci_req_is_complete(hdev))
4586 return;
4587
4588 /* If this was the last command in a request the complete
4589 * callback would be found in hdev->sent_cmd instead of the
4590 * command queue (hdev->cmd_q).
4591 */
4592 if (hdev->sent_cmd) {
4593 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4594
4595 if (req_complete) {
4596 /* We must set the complete callback to NULL to
4597 * avoid calling the callback more than once if
4598 * this function gets called again.
4599 */
4600 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4601
9238f36a 4602 goto call_complete;
53e21fbc 4603 }
9238f36a
JH
4604 }
4605
4606 /* Remove all pending commands belonging to this request */
4607 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4608 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4609 if (bt_cb(skb)->req.start) {
4610 __skb_queue_head(&hdev->cmd_q, skb);
4611 break;
4612 }
4613
4614 req_complete = bt_cb(skb)->req.complete;
4615 kfree_skb(skb);
4616 }
4617 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4618
4619call_complete:
4620 if (req_complete)
4621 req_complete(hdev, status);
4622}
4623
b78752cc 4624static void hci_rx_work(struct work_struct *work)
1da177e4 4625{
b78752cc 4626 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4627 struct sk_buff *skb;
4628
4629 BT_DBG("%s", hdev->name);
4630
1da177e4 4631 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4632 /* Send copy to monitor */
4633 hci_send_to_monitor(hdev, skb);
4634
1da177e4
LT
4635 if (atomic_read(&hdev->promisc)) {
4636 /* Send copy to the sockets */
470fe1b5 4637 hci_send_to_sock(hdev, skb);
1da177e4
LT
4638 }
4639
0736cfa8
MH
4640 if (test_bit(HCI_RAW, &hdev->flags) ||
4641 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4642 kfree_skb(skb);
4643 continue;
4644 }
4645
4646 if (test_bit(HCI_INIT, &hdev->flags)) {
4647 /* Don't process data packets in this states. */
0d48d939 4648 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4649 case HCI_ACLDATA_PKT:
4650 case HCI_SCODATA_PKT:
4651 kfree_skb(skb);
4652 continue;
3ff50b79 4653 }
1da177e4
LT
4654 }
4655
4656 /* Process frame */
0d48d939 4657 switch (bt_cb(skb)->pkt_type) {
1da177e4 4658 case HCI_EVENT_PKT:
b78752cc 4659 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4660 hci_event_packet(hdev, skb);
4661 break;
4662
4663 case HCI_ACLDATA_PKT:
4664 BT_DBG("%s ACL data packet", hdev->name);
4665 hci_acldata_packet(hdev, skb);
4666 break;
4667
4668 case HCI_SCODATA_PKT:
4669 BT_DBG("%s SCO data packet", hdev->name);
4670 hci_scodata_packet(hdev, skb);
4671 break;
4672
4673 default:
4674 kfree_skb(skb);
4675 break;
4676 }
4677 }
1da177e4
LT
4678}
4679
c347b765 4680static void hci_cmd_work(struct work_struct *work)
1da177e4 4681{
c347b765 4682 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4683 struct sk_buff *skb;
4684
2104786b
AE
4685 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4686 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4687
1da177e4 4688 /* Send queued commands */
5a08ecce
AE
4689 if (atomic_read(&hdev->cmd_cnt)) {
4690 skb = skb_dequeue(&hdev->cmd_q);
4691 if (!skb)
4692 return;
4693
7585b97a 4694 kfree_skb(hdev->sent_cmd);
1da177e4 4695
a675d7f1 4696 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4697 if (hdev->sent_cmd) {
1da177e4 4698 atomic_dec(&hdev->cmd_cnt);
57d17d70 4699 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
4700 if (test_bit(HCI_RESET, &hdev->flags))
4701 del_timer(&hdev->cmd_timer);
4702 else
4703 mod_timer(&hdev->cmd_timer,
5f246e89 4704 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
4705 } else {
4706 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4707 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4708 }
4709 }
4710}
This page took 1.351026 seconds and 5 git commands to generate.