Bluetooth: Keep msec in DISCOV_INTERLEAVED_TIMEOUT
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
970c4e46
JH
38#include "smp.h"
39
b78752cc 40static void hci_rx_work(struct work_struct *work);
c347b765 41static void hci_cmd_work(struct work_struct *work);
3eff45ea 42static void hci_tx_work(struct work_struct *work);
1da177e4 43
1da177e4
LT
44/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
3df92b31
SL
52/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
1da177e4
LT
55/* ---- HCI notifications ---- */
56
6516455d 57static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 58{
040030ef 59 hci_sock_dev_event(hdev, event);
1da177e4
LT
60}
61
baf27f6e
MH
62/* ---- HCI debugfs entries ---- */
63
4b4148e9
MH
64static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
dfb826a8
MH
129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
cfbb2b5b
MH
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
70afe0b8
MH
167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
47219839
MH
192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
199 u8 i, val[16];
200
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
207
208 seq_printf(f, "%pUb\n", val);
47219839
MH
209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
baf27f6e
MH
227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
02d08d15
MH
263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
babdbb3c
MH
291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
041000b9
MH
315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
ebd1e33b
MH
329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
06f5b778
MH
354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
5afeac14
MH
403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
134c2a89
MH
449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
2bfa3531
MH
467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
2be48b65 475 hdev->idle_timeout = val;
2bfa3531
MH
476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
c982b2ea
JH
495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
2bfa3531
MH
526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
2be48b65 534 hdev->sniff_min_interval = val;
2bfa3531
MH
535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
2be48b65 562 hdev->sniff_max_interval = val;
2bfa3531
MH
563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
ac345813
MH
582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
a1f4c318 585 bdaddr_t addr;
ac345813
MH
586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
a1f4c318 590 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 591
a1f4c318 592 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 593 16, hdev->irk, &hdev->rpa);
ac345813
MH
594
595 hci_dev_unlock(hdev);
596
597 return 0;
598}
599
600static int identity_open(struct inode *inode, struct file *file)
601{
602 return single_open(file, identity_show, inode->i_private);
603}
604
605static const struct file_operations identity_fops = {
606 .open = identity_open,
607 .read = seq_read,
608 .llseek = seq_lseek,
609 .release = single_release,
610};
611
7a4cd51d
MH
612static int random_address_show(struct seq_file *f, void *p)
613{
614 struct hci_dev *hdev = f->private;
615
616 hci_dev_lock(hdev);
617 seq_printf(f, "%pMR\n", &hdev->random_addr);
618 hci_dev_unlock(hdev);
619
620 return 0;
621}
622
623static int random_address_open(struct inode *inode, struct file *file)
624{
625 return single_open(file, random_address_show, inode->i_private);
626}
627
628static const struct file_operations random_address_fops = {
629 .open = random_address_open,
630 .read = seq_read,
631 .llseek = seq_lseek,
632 .release = single_release,
633};
634
e7b8fc92
MH
635static int static_address_show(struct seq_file *f, void *p)
636{
637 struct hci_dev *hdev = f->private;
638
639 hci_dev_lock(hdev);
640 seq_printf(f, "%pMR\n", &hdev->static_addr);
641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int static_address_open(struct inode *inode, struct file *file)
647{
648 return single_open(file, static_address_show, inode->i_private);
649}
650
651static const struct file_operations static_address_fops = {
652 .open = static_address_open,
653 .read = seq_read,
654 .llseek = seq_lseek,
655 .release = single_release,
656};
657
b32bba6c
MH
658static ssize_t force_static_address_read(struct file *file,
659 char __user *user_buf,
660 size_t count, loff_t *ppos)
92202185 661{
b32bba6c
MH
662 struct hci_dev *hdev = file->private_data;
663 char buf[3];
92202185 664
b32bba6c
MH
665 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
666 buf[1] = '\n';
667 buf[2] = '\0';
668 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
669}
670
b32bba6c
MH
671static ssize_t force_static_address_write(struct file *file,
672 const char __user *user_buf,
673 size_t count, loff_t *ppos)
92202185 674{
b32bba6c
MH
675 struct hci_dev *hdev = file->private_data;
676 char buf[32];
677 size_t buf_size = min(count, (sizeof(buf)-1));
678 bool enable;
92202185 679
b32bba6c
MH
680 if (test_bit(HCI_UP, &hdev->flags))
681 return -EBUSY;
92202185 682
b32bba6c
MH
683 if (copy_from_user(buf, user_buf, buf_size))
684 return -EFAULT;
685
686 buf[buf_size] = '\0';
687 if (strtobool(buf, &enable))
688 return -EINVAL;
689
690 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
691 return -EALREADY;
692
693 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
694
695 return count;
92202185
MH
696}
697
b32bba6c
MH
698static const struct file_operations force_static_address_fops = {
699 .open = simple_open,
700 .read = force_static_address_read,
701 .write = force_static_address_write,
702 .llseek = default_llseek,
703};
92202185 704
d2ab0ac1
MH
705static int white_list_show(struct seq_file *f, void *ptr)
706{
707 struct hci_dev *hdev = f->private;
708 struct bdaddr_list *b;
709
710 hci_dev_lock(hdev);
711 list_for_each_entry(b, &hdev->le_white_list, list)
712 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
713 hci_dev_unlock(hdev);
714
715 return 0;
716}
717
718static int white_list_open(struct inode *inode, struct file *file)
719{
720 return single_open(file, white_list_show, inode->i_private);
721}
722
723static const struct file_operations white_list_fops = {
724 .open = white_list_open,
725 .read = seq_read,
726 .llseek = seq_lseek,
727 .release = single_release,
728};
729
3698d704
MH
730static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
731{
732 struct hci_dev *hdev = f->private;
733 struct list_head *p, *n;
734
735 hci_dev_lock(hdev);
736 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
737 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
738 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
739 &irk->bdaddr, irk->addr_type,
740 16, irk->val, &irk->rpa);
741 }
742 hci_dev_unlock(hdev);
743
744 return 0;
745}
746
747static int identity_resolving_keys_open(struct inode *inode, struct file *file)
748{
749 return single_open(file, identity_resolving_keys_show,
750 inode->i_private);
751}
752
753static const struct file_operations identity_resolving_keys_fops = {
754 .open = identity_resolving_keys_open,
755 .read = seq_read,
756 .llseek = seq_lseek,
757 .release = single_release,
758};
759
8f8625cd
MH
760static int long_term_keys_show(struct seq_file *f, void *ptr)
761{
762 struct hci_dev *hdev = f->private;
763 struct list_head *p, *n;
764
765 hci_dev_lock(hdev);
f813f1be 766 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 767 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 768 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
769 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
770 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 771 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
772 }
773 hci_dev_unlock(hdev);
774
775 return 0;
776}
777
778static int long_term_keys_open(struct inode *inode, struct file *file)
779{
780 return single_open(file, long_term_keys_show, inode->i_private);
781}
782
783static const struct file_operations long_term_keys_fops = {
784 .open = long_term_keys_open,
785 .read = seq_read,
786 .llseek = seq_lseek,
787 .release = single_release,
788};
789
4e70c7e7
MH
790static int conn_min_interval_set(void *data, u64 val)
791{
792 struct hci_dev *hdev = data;
793
794 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
795 return -EINVAL;
796
797 hci_dev_lock(hdev);
2be48b65 798 hdev->le_conn_min_interval = val;
4e70c7e7
MH
799 hci_dev_unlock(hdev);
800
801 return 0;
802}
803
804static int conn_min_interval_get(void *data, u64 *val)
805{
806 struct hci_dev *hdev = data;
807
808 hci_dev_lock(hdev);
809 *val = hdev->le_conn_min_interval;
810 hci_dev_unlock(hdev);
811
812 return 0;
813}
814
815DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
816 conn_min_interval_set, "%llu\n");
817
818static int conn_max_interval_set(void *data, u64 val)
819{
820 struct hci_dev *hdev = data;
821
822 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
823 return -EINVAL;
824
825 hci_dev_lock(hdev);
2be48b65 826 hdev->le_conn_max_interval = val;
4e70c7e7
MH
827 hci_dev_unlock(hdev);
828
829 return 0;
830}
831
832static int conn_max_interval_get(void *data, u64 *val)
833{
834 struct hci_dev *hdev = data;
835
836 hci_dev_lock(hdev);
837 *val = hdev->le_conn_max_interval;
838 hci_dev_unlock(hdev);
839
840 return 0;
841}
842
843DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
844 conn_max_interval_set, "%llu\n");
845
3f959d46
MH
846static int adv_channel_map_set(void *data, u64 val)
847{
848 struct hci_dev *hdev = data;
849
850 if (val < 0x01 || val > 0x07)
851 return -EINVAL;
852
853 hci_dev_lock(hdev);
854 hdev->le_adv_channel_map = val;
855 hci_dev_unlock(hdev);
856
857 return 0;
858}
859
860static int adv_channel_map_get(void *data, u64 *val)
861{
862 struct hci_dev *hdev = data;
863
864 hci_dev_lock(hdev);
865 *val = hdev->le_adv_channel_map;
866 hci_dev_unlock(hdev);
867
868 return 0;
869}
870
871DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
872 adv_channel_map_set, "%llu\n");
873
89863109
JR
874static ssize_t lowpan_read(struct file *file, char __user *user_buf,
875 size_t count, loff_t *ppos)
876{
877 struct hci_dev *hdev = file->private_data;
878 char buf[3];
879
880 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
881 buf[1] = '\n';
882 buf[2] = '\0';
883 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
884}
885
886static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
887 size_t count, loff_t *position)
888{
889 struct hci_dev *hdev = fp->private_data;
890 bool enable;
891 char buf[32];
892 size_t buf_size = min(count, (sizeof(buf)-1));
893
894 if (copy_from_user(buf, user_buffer, buf_size))
895 return -EFAULT;
896
897 buf[buf_size] = '\0';
898
899 if (strtobool(buf, &enable) < 0)
900 return -EINVAL;
901
902 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
903 return -EALREADY;
904
905 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
906
907 return count;
908}
909
910static const struct file_operations lowpan_debugfs_fops = {
911 .open = simple_open,
912 .read = lowpan_read,
913 .write = lowpan_write,
914 .llseek = default_llseek,
915};
916
7d474e06
AG
917static int le_auto_conn_show(struct seq_file *sf, void *ptr)
918{
919 struct hci_dev *hdev = sf->private;
920 struct hci_conn_params *p;
921
922 hci_dev_lock(hdev);
923
924 list_for_each_entry(p, &hdev->le_conn_params, list) {
925 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
926 p->auto_connect);
927 }
928
929 hci_dev_unlock(hdev);
930
931 return 0;
932}
933
934static int le_auto_conn_open(struct inode *inode, struct file *file)
935{
936 return single_open(file, le_auto_conn_show, inode->i_private);
937}
938
939static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
940 size_t count, loff_t *offset)
941{
942 struct seq_file *sf = file->private_data;
943 struct hci_dev *hdev = sf->private;
944 u8 auto_connect = 0;
945 bdaddr_t addr;
946 u8 addr_type;
947 char *buf;
948 int err = 0;
949 int n;
950
951 /* Don't allow partial write */
952 if (*offset != 0)
953 return -EINVAL;
954
955 if (count < 3)
956 return -EINVAL;
957
4408dd15
AG
958 buf = memdup_user(data, count);
959 if (IS_ERR(buf))
960 return PTR_ERR(buf);
7d474e06
AG
961
962 if (memcmp(buf, "add", 3) == 0) {
963 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
964 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
965 &addr.b[1], &addr.b[0], &addr_type,
966 &auto_connect);
967
968 if (n < 7) {
969 err = -EINVAL;
970 goto done;
971 }
972
973 hci_dev_lock(hdev);
974 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
975 hdev->le_conn_min_interval,
976 hdev->le_conn_max_interval);
977 hci_dev_unlock(hdev);
978
979 if (err)
980 goto done;
981 } else if (memcmp(buf, "del", 3) == 0) {
982 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
983 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
984 &addr.b[1], &addr.b[0], &addr_type);
985
986 if (n < 7) {
987 err = -EINVAL;
988 goto done;
989 }
990
991 hci_dev_lock(hdev);
992 hci_conn_params_del(hdev, &addr, addr_type);
993 hci_dev_unlock(hdev);
994 } else if (memcmp(buf, "clr", 3) == 0) {
995 hci_dev_lock(hdev);
996 hci_conn_params_clear(hdev);
997 hci_pend_le_conns_clear(hdev);
998 hci_update_background_scan(hdev);
999 hci_dev_unlock(hdev);
1000 } else {
1001 err = -EINVAL;
1002 }
1003
1004done:
1005 kfree(buf);
1006
1007 if (err)
1008 return err;
1009 else
1010 return count;
1011}
1012
1013static const struct file_operations le_auto_conn_fops = {
1014 .open = le_auto_conn_open,
1015 .read = seq_read,
1016 .write = le_auto_conn_write,
1017 .llseek = seq_lseek,
1018 .release = single_release,
1019};
1020
1da177e4
LT
1021/* ---- HCI requests ---- */
1022
42c6b129 1023static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1024{
42c6b129 1025 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1026
1027 if (hdev->req_status == HCI_REQ_PEND) {
1028 hdev->req_result = result;
1029 hdev->req_status = HCI_REQ_DONE;
1030 wake_up_interruptible(&hdev->req_wait_q);
1031 }
1032}
1033
1034static void hci_req_cancel(struct hci_dev *hdev, int err)
1035{
1036 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1037
1038 if (hdev->req_status == HCI_REQ_PEND) {
1039 hdev->req_result = err;
1040 hdev->req_status = HCI_REQ_CANCELED;
1041 wake_up_interruptible(&hdev->req_wait_q);
1042 }
1043}
1044
77a63e0a
FW
1045static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1046 u8 event)
75e84b7c
JH
1047{
1048 struct hci_ev_cmd_complete *ev;
1049 struct hci_event_hdr *hdr;
1050 struct sk_buff *skb;
1051
1052 hci_dev_lock(hdev);
1053
1054 skb = hdev->recv_evt;
1055 hdev->recv_evt = NULL;
1056
1057 hci_dev_unlock(hdev);
1058
1059 if (!skb)
1060 return ERR_PTR(-ENODATA);
1061
1062 if (skb->len < sizeof(*hdr)) {
1063 BT_ERR("Too short HCI event");
1064 goto failed;
1065 }
1066
1067 hdr = (void *) skb->data;
1068 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1069
7b1abbbe
JH
1070 if (event) {
1071 if (hdr->evt != event)
1072 goto failed;
1073 return skb;
1074 }
1075
75e84b7c
JH
1076 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1077 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1078 goto failed;
1079 }
1080
1081 if (skb->len < sizeof(*ev)) {
1082 BT_ERR("Too short cmd_complete event");
1083 goto failed;
1084 }
1085
1086 ev = (void *) skb->data;
1087 skb_pull(skb, sizeof(*ev));
1088
1089 if (opcode == __le16_to_cpu(ev->opcode))
1090 return skb;
1091
1092 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1093 __le16_to_cpu(ev->opcode));
1094
1095failed:
1096 kfree_skb(skb);
1097 return ERR_PTR(-ENODATA);
1098}
1099
7b1abbbe 1100struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1101 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1102{
1103 DECLARE_WAITQUEUE(wait, current);
1104 struct hci_request req;
1105 int err = 0;
1106
1107 BT_DBG("%s", hdev->name);
1108
1109 hci_req_init(&req, hdev);
1110
7b1abbbe 1111 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1112
1113 hdev->req_status = HCI_REQ_PEND;
1114
1115 err = hci_req_run(&req, hci_req_sync_complete);
1116 if (err < 0)
1117 return ERR_PTR(err);
1118
1119 add_wait_queue(&hdev->req_wait_q, &wait);
1120 set_current_state(TASK_INTERRUPTIBLE);
1121
1122 schedule_timeout(timeout);
1123
1124 remove_wait_queue(&hdev->req_wait_q, &wait);
1125
1126 if (signal_pending(current))
1127 return ERR_PTR(-EINTR);
1128
1129 switch (hdev->req_status) {
1130 case HCI_REQ_DONE:
1131 err = -bt_to_errno(hdev->req_result);
1132 break;
1133
1134 case HCI_REQ_CANCELED:
1135 err = -hdev->req_result;
1136 break;
1137
1138 default:
1139 err = -ETIMEDOUT;
1140 break;
1141 }
1142
1143 hdev->req_status = hdev->req_result = 0;
1144
1145 BT_DBG("%s end: err %d", hdev->name, err);
1146
1147 if (err < 0)
1148 return ERR_PTR(err);
1149
7b1abbbe
JH
1150 return hci_get_cmd_complete(hdev, opcode, event);
1151}
1152EXPORT_SYMBOL(__hci_cmd_sync_ev);
1153
1154struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1155 const void *param, u32 timeout)
7b1abbbe
JH
1156{
1157 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1158}
1159EXPORT_SYMBOL(__hci_cmd_sync);
1160
1da177e4 1161/* Execute request and wait for completion. */
01178cd4 1162static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1163 void (*func)(struct hci_request *req,
1164 unsigned long opt),
01178cd4 1165 unsigned long opt, __u32 timeout)
1da177e4 1166{
42c6b129 1167 struct hci_request req;
1da177e4
LT
1168 DECLARE_WAITQUEUE(wait, current);
1169 int err = 0;
1170
1171 BT_DBG("%s start", hdev->name);
1172
42c6b129
JH
1173 hci_req_init(&req, hdev);
1174
1da177e4
LT
1175 hdev->req_status = HCI_REQ_PEND;
1176
42c6b129 1177 func(&req, opt);
53cce22d 1178
42c6b129
JH
1179 err = hci_req_run(&req, hci_req_sync_complete);
1180 if (err < 0) {
53cce22d 1181 hdev->req_status = 0;
920c8300
AG
1182
1183 /* ENODATA means the HCI request command queue is empty.
1184 * This can happen when a request with conditionals doesn't
1185 * trigger any commands to be sent. This is normal behavior
1186 * and should not trigger an error return.
42c6b129 1187 */
920c8300
AG
1188 if (err == -ENODATA)
1189 return 0;
1190
1191 return err;
53cce22d
JH
1192 }
1193
bc4445c7
AG
1194 add_wait_queue(&hdev->req_wait_q, &wait);
1195 set_current_state(TASK_INTERRUPTIBLE);
1196
1da177e4
LT
1197 schedule_timeout(timeout);
1198
1199 remove_wait_queue(&hdev->req_wait_q, &wait);
1200
1201 if (signal_pending(current))
1202 return -EINTR;
1203
1204 switch (hdev->req_status) {
1205 case HCI_REQ_DONE:
e175072f 1206 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1207 break;
1208
1209 case HCI_REQ_CANCELED:
1210 err = -hdev->req_result;
1211 break;
1212
1213 default:
1214 err = -ETIMEDOUT;
1215 break;
3ff50b79 1216 }
1da177e4 1217
a5040efa 1218 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1219
1220 BT_DBG("%s end: err %d", hdev->name, err);
1221
1222 return err;
1223}
1224
01178cd4 1225static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1226 void (*req)(struct hci_request *req,
1227 unsigned long opt),
01178cd4 1228 unsigned long opt, __u32 timeout)
1da177e4
LT
1229{
1230 int ret;
1231
7c6a329e
MH
1232 if (!test_bit(HCI_UP, &hdev->flags))
1233 return -ENETDOWN;
1234
1da177e4
LT
1235 /* Serialize all requests */
1236 hci_req_lock(hdev);
01178cd4 1237 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1238 hci_req_unlock(hdev);
1239
1240 return ret;
1241}
1242
42c6b129 1243static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1244{
42c6b129 1245 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1246
1247 /* Reset device */
42c6b129
JH
1248 set_bit(HCI_RESET, &req->hdev->flags);
1249 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1250}
1251
42c6b129 1252static void bredr_init(struct hci_request *req)
1da177e4 1253{
42c6b129 1254 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1255
1da177e4 1256 /* Read Local Supported Features */
42c6b129 1257 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1258
1143e5a6 1259 /* Read Local Version */
42c6b129 1260 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1261
1262 /* Read BD Address */
42c6b129 1263 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1264}
1265
42c6b129 1266static void amp_init(struct hci_request *req)
e61ef499 1267{
42c6b129 1268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1269
e61ef499 1270 /* Read Local Version */
42c6b129 1271 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1272
f6996cfe
MH
1273 /* Read Local Supported Commands */
1274 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1275
1276 /* Read Local Supported Features */
1277 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1278
6bcbc489 1279 /* Read Local AMP Info */
42c6b129 1280 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1281
1282 /* Read Data Blk size */
42c6b129 1283 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1284
f38ba941
MH
1285 /* Read Flow Control Mode */
1286 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1287
7528ca1c
MH
1288 /* Read Location Data */
1289 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1290}
1291
42c6b129 1292static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1293{
42c6b129 1294 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1295
1296 BT_DBG("%s %ld", hdev->name, opt);
1297
11778716
AE
1298 /* Reset */
1299 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1300 hci_reset_req(req, 0);
11778716 1301
e61ef499
AE
1302 switch (hdev->dev_type) {
1303 case HCI_BREDR:
42c6b129 1304 bredr_init(req);
e61ef499
AE
1305 break;
1306
1307 case HCI_AMP:
42c6b129 1308 amp_init(req);
e61ef499
AE
1309 break;
1310
1311 default:
1312 BT_ERR("Unknown device type %d", hdev->dev_type);
1313 break;
1314 }
e61ef499
AE
1315}
1316
42c6b129 1317static void bredr_setup(struct hci_request *req)
2177bab5 1318{
4ca048e3
MH
1319 struct hci_dev *hdev = req->hdev;
1320
2177bab5
JH
1321 __le16 param;
1322 __u8 flt_type;
1323
1324 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1325 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1326
1327 /* Read Class of Device */
42c6b129 1328 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1329
1330 /* Read Local Name */
42c6b129 1331 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1332
1333 /* Read Voice Setting */
42c6b129 1334 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1335
b4cb9fb2
MH
1336 /* Read Number of Supported IAC */
1337 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1338
4b836f39
MH
1339 /* Read Current IAC LAP */
1340 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1341
2177bab5
JH
1342 /* Clear Event Filters */
1343 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1344 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1345
1346 /* Connection accept timeout ~20 secs */
dcf4adbf 1347 param = cpu_to_le16(0x7d00);
42c6b129 1348 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1349
4ca048e3
MH
1350 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1351 * but it does not support page scan related HCI commands.
1352 */
1353 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1354 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1355 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1356 }
2177bab5
JH
1357}
1358
42c6b129 1359static void le_setup(struct hci_request *req)
2177bab5 1360{
c73eee91
JH
1361 struct hci_dev *hdev = req->hdev;
1362
2177bab5 1363 /* Read LE Buffer Size */
42c6b129 1364 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1365
1366 /* Read LE Local Supported Features */
42c6b129 1367 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1368
747d3f03
MH
1369 /* Read LE Supported States */
1370 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1371
2177bab5 1372 /* Read LE Advertising Channel TX Power */
42c6b129 1373 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1374
1375 /* Read LE White List Size */
42c6b129 1376 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1377
747d3f03
MH
1378 /* Clear LE White List */
1379 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1380
1381 /* LE-only controllers have LE implicitly enabled */
1382 if (!lmp_bredr_capable(hdev))
1383 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1384}
1385
1386static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1387{
1388 if (lmp_ext_inq_capable(hdev))
1389 return 0x02;
1390
1391 if (lmp_inq_rssi_capable(hdev))
1392 return 0x01;
1393
1394 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1395 hdev->lmp_subver == 0x0757)
1396 return 0x01;
1397
1398 if (hdev->manufacturer == 15) {
1399 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1400 return 0x01;
1401 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1402 return 0x01;
1403 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1404 return 0x01;
1405 }
1406
1407 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1408 hdev->lmp_subver == 0x1805)
1409 return 0x01;
1410
1411 return 0x00;
1412}
1413
42c6b129 1414static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1415{
1416 u8 mode;
1417
42c6b129 1418 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1419
42c6b129 1420 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1421}
1422
42c6b129 1423static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1424{
42c6b129
JH
1425 struct hci_dev *hdev = req->hdev;
1426
2177bab5
JH
1427 /* The second byte is 0xff instead of 0x9f (two reserved bits
1428 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1429 * command otherwise.
1430 */
1431 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1432
1433 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1434 * any event mask for pre 1.2 devices.
1435 */
1436 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1437 return;
1438
1439 if (lmp_bredr_capable(hdev)) {
1440 events[4] |= 0x01; /* Flow Specification Complete */
1441 events[4] |= 0x02; /* Inquiry Result with RSSI */
1442 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1443 events[5] |= 0x08; /* Synchronous Connection Complete */
1444 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1445 } else {
1446 /* Use a different default for LE-only devices */
1447 memset(events, 0, sizeof(events));
1448 events[0] |= 0x10; /* Disconnection Complete */
1449 events[0] |= 0x80; /* Encryption Change */
1450 events[1] |= 0x08; /* Read Remote Version Information Complete */
1451 events[1] |= 0x20; /* Command Complete */
1452 events[1] |= 0x40; /* Command Status */
1453 events[1] |= 0x80; /* Hardware Error */
1454 events[2] |= 0x04; /* Number of Completed Packets */
1455 events[3] |= 0x02; /* Data Buffer Overflow */
1456 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1457 }
1458
1459 if (lmp_inq_rssi_capable(hdev))
1460 events[4] |= 0x02; /* Inquiry Result with RSSI */
1461
1462 if (lmp_sniffsubr_capable(hdev))
1463 events[5] |= 0x20; /* Sniff Subrating */
1464
1465 if (lmp_pause_enc_capable(hdev))
1466 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1467
1468 if (lmp_ext_inq_capable(hdev))
1469 events[5] |= 0x40; /* Extended Inquiry Result */
1470
1471 if (lmp_no_flush_capable(hdev))
1472 events[7] |= 0x01; /* Enhanced Flush Complete */
1473
1474 if (lmp_lsto_capable(hdev))
1475 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1476
1477 if (lmp_ssp_capable(hdev)) {
1478 events[6] |= 0x01; /* IO Capability Request */
1479 events[6] |= 0x02; /* IO Capability Response */
1480 events[6] |= 0x04; /* User Confirmation Request */
1481 events[6] |= 0x08; /* User Passkey Request */
1482 events[6] |= 0x10; /* Remote OOB Data Request */
1483 events[6] |= 0x20; /* Simple Pairing Complete */
1484 events[7] |= 0x04; /* User Passkey Notification */
1485 events[7] |= 0x08; /* Keypress Notification */
1486 events[7] |= 0x10; /* Remote Host Supported
1487 * Features Notification
1488 */
1489 }
1490
1491 if (lmp_le_capable(hdev))
1492 events[7] |= 0x20; /* LE Meta-Event */
1493
42c6b129 1494 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1495
1496 if (lmp_le_capable(hdev)) {
1497 memset(events, 0, sizeof(events));
1498 events[0] = 0x1f;
42c6b129
JH
1499 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1500 sizeof(events), events);
2177bab5
JH
1501 }
1502}
1503
42c6b129 1504static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1505{
42c6b129
JH
1506 struct hci_dev *hdev = req->hdev;
1507
2177bab5 1508 if (lmp_bredr_capable(hdev))
42c6b129 1509 bredr_setup(req);
56f87901
JH
1510 else
1511 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1512
1513 if (lmp_le_capable(hdev))
42c6b129 1514 le_setup(req);
2177bab5 1515
42c6b129 1516 hci_setup_event_mask(req);
2177bab5 1517
3f8e2d75
JH
1518 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1519 * local supported commands HCI command.
1520 */
1521 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1522 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1523
1524 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1525 /* When SSP is available, then the host features page
1526 * should also be available as well. However some
1527 * controllers list the max_page as 0 as long as SSP
1528 * has not been enabled. To achieve proper debugging
1529 * output, force the minimum max_page to 1 at least.
1530 */
1531 hdev->max_page = 0x01;
1532
2177bab5
JH
1533 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1534 u8 mode = 0x01;
42c6b129
JH
1535 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1536 sizeof(mode), &mode);
2177bab5
JH
1537 } else {
1538 struct hci_cp_write_eir cp;
1539
1540 memset(hdev->eir, 0, sizeof(hdev->eir));
1541 memset(&cp, 0, sizeof(cp));
1542
42c6b129 1543 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1544 }
1545 }
1546
1547 if (lmp_inq_rssi_capable(hdev))
42c6b129 1548 hci_setup_inquiry_mode(req);
2177bab5
JH
1549
1550 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1551 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1552
1553 if (lmp_ext_feat_capable(hdev)) {
1554 struct hci_cp_read_local_ext_features cp;
1555
1556 cp.page = 0x01;
42c6b129
JH
1557 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1558 sizeof(cp), &cp);
2177bab5
JH
1559 }
1560
1561 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1562 u8 enable = 1;
42c6b129
JH
1563 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1564 &enable);
2177bab5
JH
1565 }
1566}
1567
42c6b129 1568static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1569{
42c6b129 1570 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1571 struct hci_cp_write_def_link_policy cp;
1572 u16 link_policy = 0;
1573
1574 if (lmp_rswitch_capable(hdev))
1575 link_policy |= HCI_LP_RSWITCH;
1576 if (lmp_hold_capable(hdev))
1577 link_policy |= HCI_LP_HOLD;
1578 if (lmp_sniff_capable(hdev))
1579 link_policy |= HCI_LP_SNIFF;
1580 if (lmp_park_capable(hdev))
1581 link_policy |= HCI_LP_PARK;
1582
1583 cp.policy = cpu_to_le16(link_policy);
42c6b129 1584 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1585}
1586
42c6b129 1587static void hci_set_le_support(struct hci_request *req)
2177bab5 1588{
42c6b129 1589 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1590 struct hci_cp_write_le_host_supported cp;
1591
c73eee91
JH
1592 /* LE-only devices do not support explicit enablement */
1593 if (!lmp_bredr_capable(hdev))
1594 return;
1595
2177bab5
JH
1596 memset(&cp, 0, sizeof(cp));
1597
1598 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1599 cp.le = 0x01;
1600 cp.simul = lmp_le_br_capable(hdev);
1601 }
1602
1603 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1604 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1605 &cp);
2177bab5
JH
1606}
1607
d62e6d67
JH
1608static void hci_set_event_mask_page_2(struct hci_request *req)
1609{
1610 struct hci_dev *hdev = req->hdev;
1611 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1612
1613 /* If Connectionless Slave Broadcast master role is supported
1614 * enable all necessary events for it.
1615 */
53b834d2 1616 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1617 events[1] |= 0x40; /* Triggered Clock Capture */
1618 events[1] |= 0x80; /* Synchronization Train Complete */
1619 events[2] |= 0x10; /* Slave Page Response Timeout */
1620 events[2] |= 0x20; /* CSB Channel Map Change */
1621 }
1622
1623 /* If Connectionless Slave Broadcast slave role is supported
1624 * enable all necessary events for it.
1625 */
53b834d2 1626 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1627 events[2] |= 0x01; /* Synchronization Train Received */
1628 events[2] |= 0x02; /* CSB Receive */
1629 events[2] |= 0x04; /* CSB Timeout */
1630 events[2] |= 0x08; /* Truncated Page Complete */
1631 }
1632
40c59fcb
MH
1633 /* Enable Authenticated Payload Timeout Expired event if supported */
1634 if (lmp_ping_capable(hdev))
1635 events[2] |= 0x80;
1636
d62e6d67
JH
1637 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1638}
1639
42c6b129 1640static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1641{
42c6b129 1642 struct hci_dev *hdev = req->hdev;
d2c5d77f 1643 u8 p;
42c6b129 1644
b8f4e068
GP
1645 /* Some Broadcom based Bluetooth controllers do not support the
1646 * Delete Stored Link Key command. They are clearly indicating its
1647 * absence in the bit mask of supported commands.
1648 *
1649 * Check the supported commands and only if the the command is marked
1650 * as supported send it. If not supported assume that the controller
1651 * does not have actual support for stored link keys which makes this
1652 * command redundant anyway.
f9f462fa
MH
1653 *
1654 * Some controllers indicate that they support handling deleting
1655 * stored link keys, but they don't. The quirk lets a driver
1656 * just disable this command.
637b4cae 1657 */
f9f462fa
MH
1658 if (hdev->commands[6] & 0x80 &&
1659 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1660 struct hci_cp_delete_stored_link_key cp;
1661
1662 bacpy(&cp.bdaddr, BDADDR_ANY);
1663 cp.delete_all = 0x01;
1664 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1665 sizeof(cp), &cp);
1666 }
1667
2177bab5 1668 if (hdev->commands[5] & 0x10)
42c6b129 1669 hci_setup_link_policy(req);
2177bab5 1670
7bf32048 1671 if (lmp_le_capable(hdev))
42c6b129 1672 hci_set_le_support(req);
d2c5d77f
JH
1673
1674 /* Read features beyond page 1 if available */
1675 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1676 struct hci_cp_read_local_ext_features cp;
1677
1678 cp.page = p;
1679 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1680 sizeof(cp), &cp);
1681 }
2177bab5
JH
1682}
1683
5d4e7e8d
JH
1684static void hci_init4_req(struct hci_request *req, unsigned long opt)
1685{
1686 struct hci_dev *hdev = req->hdev;
1687
d62e6d67
JH
1688 /* Set event mask page 2 if the HCI command for it is supported */
1689 if (hdev->commands[22] & 0x04)
1690 hci_set_event_mask_page_2(req);
1691
5d4e7e8d 1692 /* Check for Synchronization Train support */
53b834d2 1693 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1694 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1695
1696 /* Enable Secure Connections if supported and configured */
5afeac14
MH
1697 if ((lmp_sc_capable(hdev) ||
1698 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
a6d0d690
MH
1699 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1700 u8 support = 0x01;
1701 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1702 sizeof(support), &support);
1703 }
5d4e7e8d
JH
1704}
1705
2177bab5
JH
1706static int __hci_init(struct hci_dev *hdev)
1707{
1708 int err;
1709
1710 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1711 if (err < 0)
1712 return err;
1713
4b4148e9
MH
1714 /* The Device Under Test (DUT) mode is special and available for
1715 * all controller types. So just create it early on.
1716 */
1717 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1718 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1719 &dut_mode_fops);
1720 }
1721
2177bab5
JH
1722 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1723 * BR/EDR/LE type controllers. AMP controllers only need the
1724 * first stage init.
1725 */
1726 if (hdev->dev_type != HCI_BREDR)
1727 return 0;
1728
1729 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1730 if (err < 0)
1731 return err;
1732
5d4e7e8d
JH
1733 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1734 if (err < 0)
1735 return err;
1736
baf27f6e
MH
1737 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1738 if (err < 0)
1739 return err;
1740
1741 /* Only create debugfs entries during the initial setup
1742 * phase and not every time the controller gets powered on.
1743 */
1744 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1745 return 0;
1746
dfb826a8
MH
1747 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1748 &features_fops);
ceeb3bc0
MH
1749 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1750 &hdev->manufacturer);
1751 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1752 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1753 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1754 &blacklist_fops);
47219839
MH
1755 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1756
baf27f6e
MH
1757 if (lmp_bredr_capable(hdev)) {
1758 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1759 hdev, &inquiry_cache_fops);
02d08d15
MH
1760 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1761 hdev, &link_keys_fops);
babdbb3c
MH
1762 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1763 hdev, &dev_class_fops);
041000b9
MH
1764 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1765 hdev, &voice_setting_fops);
baf27f6e
MH
1766 }
1767
06f5b778 1768 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1769 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1770 hdev, &auto_accept_delay_fops);
06f5b778
MH
1771 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1772 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1773 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1774 hdev, &force_sc_support_fops);
134c2a89
MH
1775 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1776 hdev, &sc_only_mode_fops);
06f5b778 1777 }
ebd1e33b 1778
2bfa3531
MH
1779 if (lmp_sniff_capable(hdev)) {
1780 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1781 hdev, &idle_timeout_fops);
1782 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1783 hdev, &sniff_min_interval_fops);
1784 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1785 hdev, &sniff_max_interval_fops);
1786 }
1787
d0f729b8 1788 if (lmp_le_capable(hdev)) {
ac345813
MH
1789 debugfs_create_file("identity", 0400, hdev->debugfs,
1790 hdev, &identity_fops);
1791 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1792 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1793 debugfs_create_file("random_address", 0444, hdev->debugfs,
1794 hdev, &random_address_fops);
b32bba6c
MH
1795 debugfs_create_file("static_address", 0444, hdev->debugfs,
1796 hdev, &static_address_fops);
1797
1798 /* For controllers with a public address, provide a debug
1799 * option to force the usage of the configured static
1800 * address. By default the public address is used.
1801 */
1802 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1803 debugfs_create_file("force_static_address", 0644,
1804 hdev->debugfs, hdev,
1805 &force_static_address_fops);
1806
d0f729b8
MH
1807 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1808 &hdev->le_white_list_size);
d2ab0ac1
MH
1809 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1810 &white_list_fops);
3698d704
MH
1811 debugfs_create_file("identity_resolving_keys", 0400,
1812 hdev->debugfs, hdev,
1813 &identity_resolving_keys_fops);
8f8625cd
MH
1814 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1815 hdev, &long_term_keys_fops);
4e70c7e7
MH
1816 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1817 hdev, &conn_min_interval_fops);
1818 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1819 hdev, &conn_max_interval_fops);
3f959d46
MH
1820 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1821 hdev, &adv_channel_map_fops);
89863109
JR
1822 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1823 &lowpan_debugfs_fops);
7d474e06
AG
1824 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1825 &le_auto_conn_fops);
d0f729b8 1826 }
e7b8fc92 1827
baf27f6e 1828 return 0;
2177bab5
JH
1829}
1830
42c6b129 1831static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1832{
1833 __u8 scan = opt;
1834
42c6b129 1835 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1836
1837 /* Inquiry and Page scans */
42c6b129 1838 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1839}
1840
42c6b129 1841static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1842{
1843 __u8 auth = opt;
1844
42c6b129 1845 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1846
1847 /* Authentication */
42c6b129 1848 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1849}
1850
42c6b129 1851static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1852{
1853 __u8 encrypt = opt;
1854
42c6b129 1855 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1856
e4e8e37c 1857 /* Encryption */
42c6b129 1858 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1859}
1860
42c6b129 1861static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1862{
1863 __le16 policy = cpu_to_le16(opt);
1864
42c6b129 1865 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1866
1867 /* Default link policy */
42c6b129 1868 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1869}
1870
8e87d142 1871/* Get HCI device by index.
1da177e4
LT
1872 * Device is held on return. */
1873struct hci_dev *hci_dev_get(int index)
1874{
8035ded4 1875 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1876
1877 BT_DBG("%d", index);
1878
1879 if (index < 0)
1880 return NULL;
1881
1882 read_lock(&hci_dev_list_lock);
8035ded4 1883 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1884 if (d->id == index) {
1885 hdev = hci_dev_hold(d);
1886 break;
1887 }
1888 }
1889 read_unlock(&hci_dev_list_lock);
1890 return hdev;
1891}
1da177e4
LT
1892
1893/* ---- Inquiry support ---- */
ff9ef578 1894
30dc78e1
JH
1895bool hci_discovery_active(struct hci_dev *hdev)
1896{
1897 struct discovery_state *discov = &hdev->discovery;
1898
6fbe195d 1899 switch (discov->state) {
343f935b 1900 case DISCOVERY_FINDING:
6fbe195d 1901 case DISCOVERY_RESOLVING:
30dc78e1
JH
1902 return true;
1903
6fbe195d
AG
1904 default:
1905 return false;
1906 }
30dc78e1
JH
1907}
1908
ff9ef578
JH
1909void hci_discovery_set_state(struct hci_dev *hdev, int state)
1910{
1911 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1912
1913 if (hdev->discovery.state == state)
1914 return;
1915
1916 switch (state) {
1917 case DISCOVERY_STOPPED:
c54c3860
AG
1918 hci_update_background_scan(hdev);
1919
7b99b659
AG
1920 if (hdev->discovery.state != DISCOVERY_STARTING)
1921 mgmt_discovering(hdev, 0);
ff9ef578
JH
1922 break;
1923 case DISCOVERY_STARTING:
1924 break;
343f935b 1925 case DISCOVERY_FINDING:
ff9ef578
JH
1926 mgmt_discovering(hdev, 1);
1927 break;
30dc78e1
JH
1928 case DISCOVERY_RESOLVING:
1929 break;
ff9ef578
JH
1930 case DISCOVERY_STOPPING:
1931 break;
1932 }
1933
1934 hdev->discovery.state = state;
1935}
1936
1f9b9a5d 1937void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1938{
30883512 1939 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1940 struct inquiry_entry *p, *n;
1da177e4 1941
561aafbc
JH
1942 list_for_each_entry_safe(p, n, &cache->all, all) {
1943 list_del(&p->all);
b57c1a56 1944 kfree(p);
1da177e4 1945 }
561aafbc
JH
1946
1947 INIT_LIST_HEAD(&cache->unknown);
1948 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1949}
1950
a8c5fb1a
GP
1951struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1952 bdaddr_t *bdaddr)
1da177e4 1953{
30883512 1954 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1955 struct inquiry_entry *e;
1956
6ed93dc6 1957 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1958
561aafbc
JH
1959 list_for_each_entry(e, &cache->all, all) {
1960 if (!bacmp(&e->data.bdaddr, bdaddr))
1961 return e;
1962 }
1963
1964 return NULL;
1965}
1966
1967struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1968 bdaddr_t *bdaddr)
561aafbc 1969{
30883512 1970 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1971 struct inquiry_entry *e;
1972
6ed93dc6 1973 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1974
1975 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1976 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1977 return e;
1978 }
1979
1980 return NULL;
1da177e4
LT
1981}
1982
30dc78e1 1983struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1984 bdaddr_t *bdaddr,
1985 int state)
30dc78e1
JH
1986{
1987 struct discovery_state *cache = &hdev->discovery;
1988 struct inquiry_entry *e;
1989
6ed93dc6 1990 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1991
1992 list_for_each_entry(e, &cache->resolve, list) {
1993 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1994 return e;
1995 if (!bacmp(&e->data.bdaddr, bdaddr))
1996 return e;
1997 }
1998
1999 return NULL;
2000}
2001
a3d4e20a 2002void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2003 struct inquiry_entry *ie)
a3d4e20a
JH
2004{
2005 struct discovery_state *cache = &hdev->discovery;
2006 struct list_head *pos = &cache->resolve;
2007 struct inquiry_entry *p;
2008
2009 list_del(&ie->list);
2010
2011 list_for_each_entry(p, &cache->resolve, list) {
2012 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2013 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2014 break;
2015 pos = &p->list;
2016 }
2017
2018 list_add(&ie->list, pos);
2019}
2020
3175405b 2021bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 2022 bool name_known, bool *ssp)
1da177e4 2023{
30883512 2024 struct discovery_state *cache = &hdev->discovery;
70f23020 2025 struct inquiry_entry *ie;
1da177e4 2026
6ed93dc6 2027 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2028
2b2fec4d
SJ
2029 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2030
01735bbd 2031 *ssp = data->ssp_mode;
388fc8fa 2032
70f23020 2033 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2034 if (ie) {
8002d77c 2035 if (ie->data.ssp_mode)
388fc8fa
JH
2036 *ssp = true;
2037
a3d4e20a 2038 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2039 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2040 ie->data.rssi = data->rssi;
2041 hci_inquiry_cache_update_resolve(hdev, ie);
2042 }
2043
561aafbc 2044 goto update;
a3d4e20a 2045 }
561aafbc
JH
2046
2047 /* Entry not in the cache. Add new one. */
2048 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2049 if (!ie)
3175405b 2050 return false;
561aafbc
JH
2051
2052 list_add(&ie->all, &cache->all);
2053
2054 if (name_known) {
2055 ie->name_state = NAME_KNOWN;
2056 } else {
2057 ie->name_state = NAME_NOT_KNOWN;
2058 list_add(&ie->list, &cache->unknown);
2059 }
70f23020 2060
561aafbc
JH
2061update:
2062 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2063 ie->name_state != NAME_PENDING) {
561aafbc
JH
2064 ie->name_state = NAME_KNOWN;
2065 list_del(&ie->list);
1da177e4
LT
2066 }
2067
70f23020
AE
2068 memcpy(&ie->data, data, sizeof(*data));
2069 ie->timestamp = jiffies;
1da177e4 2070 cache->timestamp = jiffies;
3175405b
JH
2071
2072 if (ie->name_state == NAME_NOT_KNOWN)
2073 return false;
2074
2075 return true;
1da177e4
LT
2076}
2077
2078static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2079{
30883512 2080 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2081 struct inquiry_info *info = (struct inquiry_info *) buf;
2082 struct inquiry_entry *e;
2083 int copied = 0;
2084
561aafbc 2085 list_for_each_entry(e, &cache->all, all) {
1da177e4 2086 struct inquiry_data *data = &e->data;
b57c1a56
JH
2087
2088 if (copied >= num)
2089 break;
2090
1da177e4
LT
2091 bacpy(&info->bdaddr, &data->bdaddr);
2092 info->pscan_rep_mode = data->pscan_rep_mode;
2093 info->pscan_period_mode = data->pscan_period_mode;
2094 info->pscan_mode = data->pscan_mode;
2095 memcpy(info->dev_class, data->dev_class, 3);
2096 info->clock_offset = data->clock_offset;
b57c1a56 2097
1da177e4 2098 info++;
b57c1a56 2099 copied++;
1da177e4
LT
2100 }
2101
2102 BT_DBG("cache %p, copied %d", cache, copied);
2103 return copied;
2104}
2105
42c6b129 2106static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2107{
2108 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2109 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2110 struct hci_cp_inquiry cp;
2111
2112 BT_DBG("%s", hdev->name);
2113
2114 if (test_bit(HCI_INQUIRY, &hdev->flags))
2115 return;
2116
2117 /* Start Inquiry */
2118 memcpy(&cp.lap, &ir->lap, 3);
2119 cp.length = ir->length;
2120 cp.num_rsp = ir->num_rsp;
42c6b129 2121 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2122}
2123
3e13fa1e
AG
2124static int wait_inquiry(void *word)
2125{
2126 schedule();
2127 return signal_pending(current);
2128}
2129
1da177e4
LT
2130int hci_inquiry(void __user *arg)
2131{
2132 __u8 __user *ptr = arg;
2133 struct hci_inquiry_req ir;
2134 struct hci_dev *hdev;
2135 int err = 0, do_inquiry = 0, max_rsp;
2136 long timeo;
2137 __u8 *buf;
2138
2139 if (copy_from_user(&ir, ptr, sizeof(ir)))
2140 return -EFAULT;
2141
5a08ecce
AE
2142 hdev = hci_dev_get(ir.dev_id);
2143 if (!hdev)
1da177e4
LT
2144 return -ENODEV;
2145
0736cfa8
MH
2146 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2147 err = -EBUSY;
2148 goto done;
2149 }
2150
5b69bef5
MH
2151 if (hdev->dev_type != HCI_BREDR) {
2152 err = -EOPNOTSUPP;
2153 goto done;
2154 }
2155
56f87901
JH
2156 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2157 err = -EOPNOTSUPP;
2158 goto done;
2159 }
2160
09fd0de5 2161 hci_dev_lock(hdev);
8e87d142 2162 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2163 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2164 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2165 do_inquiry = 1;
2166 }
09fd0de5 2167 hci_dev_unlock(hdev);
1da177e4 2168
04837f64 2169 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2170
2171 if (do_inquiry) {
01178cd4
JH
2172 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2173 timeo);
70f23020
AE
2174 if (err < 0)
2175 goto done;
3e13fa1e
AG
2176
2177 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2178 * cleared). If it is interrupted by a signal, return -EINTR.
2179 */
2180 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2181 TASK_INTERRUPTIBLE))
2182 return -EINTR;
70f23020 2183 }
1da177e4 2184
8fc9ced3
GP
2185 /* for unlimited number of responses we will use buffer with
2186 * 255 entries
2187 */
1da177e4
LT
2188 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2189
2190 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2191 * copy it to the user space.
2192 */
01df8c31 2193 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2194 if (!buf) {
1da177e4
LT
2195 err = -ENOMEM;
2196 goto done;
2197 }
2198
09fd0de5 2199 hci_dev_lock(hdev);
1da177e4 2200 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2201 hci_dev_unlock(hdev);
1da177e4
LT
2202
2203 BT_DBG("num_rsp %d", ir.num_rsp);
2204
2205 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2206 ptr += sizeof(ir);
2207 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2208 ir.num_rsp))
1da177e4 2209 err = -EFAULT;
8e87d142 2210 } else
1da177e4
LT
2211 err = -EFAULT;
2212
2213 kfree(buf);
2214
2215done:
2216 hci_dev_put(hdev);
2217 return err;
2218}
2219
cbed0ca1 2220static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2221{
1da177e4
LT
2222 int ret = 0;
2223
1da177e4
LT
2224 BT_DBG("%s %p", hdev->name, hdev);
2225
2226 hci_req_lock(hdev);
2227
94324962
JH
2228 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2229 ret = -ENODEV;
2230 goto done;
2231 }
2232
a5c8f270
MH
2233 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2234 /* Check for rfkill but allow the HCI setup stage to
2235 * proceed (which in itself doesn't cause any RF activity).
2236 */
2237 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2238 ret = -ERFKILL;
2239 goto done;
2240 }
2241
2242 /* Check for valid public address or a configured static
2243 * random adddress, but let the HCI setup proceed to
2244 * be able to determine if there is a public address
2245 * or not.
2246 *
c6beca0e
MH
2247 * In case of user channel usage, it is not important
2248 * if a public address or static random address is
2249 * available.
2250 *
a5c8f270
MH
2251 * This check is only valid for BR/EDR controllers
2252 * since AMP controllers do not have an address.
2253 */
c6beca0e
MH
2254 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2255 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2256 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2257 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2258 ret = -EADDRNOTAVAIL;
2259 goto done;
2260 }
611b30f7
MH
2261 }
2262
1da177e4
LT
2263 if (test_bit(HCI_UP, &hdev->flags)) {
2264 ret = -EALREADY;
2265 goto done;
2266 }
2267
1da177e4
LT
2268 if (hdev->open(hdev)) {
2269 ret = -EIO;
2270 goto done;
2271 }
2272
f41c70c4
MH
2273 atomic_set(&hdev->cmd_cnt, 1);
2274 set_bit(HCI_INIT, &hdev->flags);
2275
2276 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2277 ret = hdev->setup(hdev);
2278
2279 if (!ret) {
f41c70c4
MH
2280 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2281 set_bit(HCI_RAW, &hdev->flags);
2282
0736cfa8
MH
2283 if (!test_bit(HCI_RAW, &hdev->flags) &&
2284 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2285 ret = __hci_init(hdev);
1da177e4
LT
2286 }
2287
f41c70c4
MH
2288 clear_bit(HCI_INIT, &hdev->flags);
2289
1da177e4
LT
2290 if (!ret) {
2291 hci_dev_hold(hdev);
d6bfd59c 2292 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2293 set_bit(HCI_UP, &hdev->flags);
2294 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2295 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2296 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2297 hdev->dev_type == HCI_BREDR) {
09fd0de5 2298 hci_dev_lock(hdev);
744cf19e 2299 mgmt_powered(hdev, 1);
09fd0de5 2300 hci_dev_unlock(hdev);
56e5cb86 2301 }
8e87d142 2302 } else {
1da177e4 2303 /* Init failed, cleanup */
3eff45ea 2304 flush_work(&hdev->tx_work);
c347b765 2305 flush_work(&hdev->cmd_work);
b78752cc 2306 flush_work(&hdev->rx_work);
1da177e4
LT
2307
2308 skb_queue_purge(&hdev->cmd_q);
2309 skb_queue_purge(&hdev->rx_q);
2310
2311 if (hdev->flush)
2312 hdev->flush(hdev);
2313
2314 if (hdev->sent_cmd) {
2315 kfree_skb(hdev->sent_cmd);
2316 hdev->sent_cmd = NULL;
2317 }
2318
2319 hdev->close(hdev);
2320 hdev->flags = 0;
2321 }
2322
2323done:
2324 hci_req_unlock(hdev);
1da177e4
LT
2325 return ret;
2326}
2327
cbed0ca1
JH
2328/* ---- HCI ioctl helpers ---- */
2329
2330int hci_dev_open(__u16 dev)
2331{
2332 struct hci_dev *hdev;
2333 int err;
2334
2335 hdev = hci_dev_get(dev);
2336 if (!hdev)
2337 return -ENODEV;
2338
e1d08f40
JH
2339 /* We need to ensure that no other power on/off work is pending
2340 * before proceeding to call hci_dev_do_open. This is
2341 * particularly important if the setup procedure has not yet
2342 * completed.
2343 */
2344 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2345 cancel_delayed_work(&hdev->power_off);
2346
a5c8f270
MH
2347 /* After this call it is guaranteed that the setup procedure
2348 * has finished. This means that error conditions like RFKILL
2349 * or no valid public or static random address apply.
2350 */
e1d08f40
JH
2351 flush_workqueue(hdev->req_workqueue);
2352
cbed0ca1
JH
2353 err = hci_dev_do_open(hdev);
2354
2355 hci_dev_put(hdev);
2356
2357 return err;
2358}
2359
1da177e4
LT
2360static int hci_dev_do_close(struct hci_dev *hdev)
2361{
2362 BT_DBG("%s %p", hdev->name, hdev);
2363
78c04c0b
VCG
2364 cancel_delayed_work(&hdev->power_off);
2365
1da177e4
LT
2366 hci_req_cancel(hdev, ENODEV);
2367 hci_req_lock(hdev);
2368
2369 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2370 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2371 hci_req_unlock(hdev);
2372 return 0;
2373 }
2374
3eff45ea
GP
2375 /* Flush RX and TX works */
2376 flush_work(&hdev->tx_work);
b78752cc 2377 flush_work(&hdev->rx_work);
1da177e4 2378
16ab91ab 2379 if (hdev->discov_timeout > 0) {
e0f9309f 2380 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2381 hdev->discov_timeout = 0;
5e5282bb 2382 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2383 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2384 }
2385
a8b2d5c2 2386 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2387 cancel_delayed_work(&hdev->service_cache);
2388
7ba8b4be 2389 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2390
2391 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2392 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2393
09fd0de5 2394 hci_dev_lock(hdev);
1f9b9a5d 2395 hci_inquiry_cache_flush(hdev);
1da177e4 2396 hci_conn_hash_flush(hdev);
6046dc3e 2397 hci_pend_le_conns_clear(hdev);
09fd0de5 2398 hci_dev_unlock(hdev);
1da177e4
LT
2399
2400 hci_notify(hdev, HCI_DEV_DOWN);
2401
2402 if (hdev->flush)
2403 hdev->flush(hdev);
2404
2405 /* Reset device */
2406 skb_queue_purge(&hdev->cmd_q);
2407 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2408 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2409 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2410 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2411 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2412 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2413 clear_bit(HCI_INIT, &hdev->flags);
2414 }
2415
c347b765
GP
2416 /* flush cmd work */
2417 flush_work(&hdev->cmd_work);
1da177e4
LT
2418
2419 /* Drop queues */
2420 skb_queue_purge(&hdev->rx_q);
2421 skb_queue_purge(&hdev->cmd_q);
2422 skb_queue_purge(&hdev->raw_q);
2423
2424 /* Drop last sent command */
2425 if (hdev->sent_cmd) {
b79f44c1 2426 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2427 kfree_skb(hdev->sent_cmd);
2428 hdev->sent_cmd = NULL;
2429 }
2430
b6ddb638
JH
2431 kfree_skb(hdev->recv_evt);
2432 hdev->recv_evt = NULL;
2433
1da177e4
LT
2434 /* After this point our queues are empty
2435 * and no tasks are scheduled. */
2436 hdev->close(hdev);
2437
35b973c9
JH
2438 /* Clear flags */
2439 hdev->flags = 0;
2440 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2441
93c311a0
MH
2442 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2443 if (hdev->dev_type == HCI_BREDR) {
2444 hci_dev_lock(hdev);
2445 mgmt_powered(hdev, 0);
2446 hci_dev_unlock(hdev);
2447 }
8ee56540 2448 }
5add6af8 2449
ced5c338 2450 /* Controller radio is available but is currently powered down */
536619e8 2451 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2452
e59fda8d 2453 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2454 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2455 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2456
1da177e4
LT
2457 hci_req_unlock(hdev);
2458
2459 hci_dev_put(hdev);
2460 return 0;
2461}
2462
2463int hci_dev_close(__u16 dev)
2464{
2465 struct hci_dev *hdev;
2466 int err;
2467
70f23020
AE
2468 hdev = hci_dev_get(dev);
2469 if (!hdev)
1da177e4 2470 return -ENODEV;
8ee56540 2471
0736cfa8
MH
2472 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2473 err = -EBUSY;
2474 goto done;
2475 }
2476
8ee56540
MH
2477 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2478 cancel_delayed_work(&hdev->power_off);
2479
1da177e4 2480 err = hci_dev_do_close(hdev);
8ee56540 2481
0736cfa8 2482done:
1da177e4
LT
2483 hci_dev_put(hdev);
2484 return err;
2485}
2486
2487int hci_dev_reset(__u16 dev)
2488{
2489 struct hci_dev *hdev;
2490 int ret = 0;
2491
70f23020
AE
2492 hdev = hci_dev_get(dev);
2493 if (!hdev)
1da177e4
LT
2494 return -ENODEV;
2495
2496 hci_req_lock(hdev);
1da177e4 2497
808a049e
MH
2498 if (!test_bit(HCI_UP, &hdev->flags)) {
2499 ret = -ENETDOWN;
1da177e4 2500 goto done;
808a049e 2501 }
1da177e4 2502
0736cfa8
MH
2503 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2504 ret = -EBUSY;
2505 goto done;
2506 }
2507
1da177e4
LT
2508 /* Drop queues */
2509 skb_queue_purge(&hdev->rx_q);
2510 skb_queue_purge(&hdev->cmd_q);
2511
09fd0de5 2512 hci_dev_lock(hdev);
1f9b9a5d 2513 hci_inquiry_cache_flush(hdev);
1da177e4 2514 hci_conn_hash_flush(hdev);
09fd0de5 2515 hci_dev_unlock(hdev);
1da177e4
LT
2516
2517 if (hdev->flush)
2518 hdev->flush(hdev);
2519
8e87d142 2520 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2521 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2522
2523 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2524 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2525
2526done:
1da177e4
LT
2527 hci_req_unlock(hdev);
2528 hci_dev_put(hdev);
2529 return ret;
2530}
2531
2532int hci_dev_reset_stat(__u16 dev)
2533{
2534 struct hci_dev *hdev;
2535 int ret = 0;
2536
70f23020
AE
2537 hdev = hci_dev_get(dev);
2538 if (!hdev)
1da177e4
LT
2539 return -ENODEV;
2540
0736cfa8
MH
2541 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2542 ret = -EBUSY;
2543 goto done;
2544 }
2545
1da177e4
LT
2546 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2547
0736cfa8 2548done:
1da177e4 2549 hci_dev_put(hdev);
1da177e4
LT
2550 return ret;
2551}
2552
2553int hci_dev_cmd(unsigned int cmd, void __user *arg)
2554{
2555 struct hci_dev *hdev;
2556 struct hci_dev_req dr;
2557 int err = 0;
2558
2559 if (copy_from_user(&dr, arg, sizeof(dr)))
2560 return -EFAULT;
2561
70f23020
AE
2562 hdev = hci_dev_get(dr.dev_id);
2563 if (!hdev)
1da177e4
LT
2564 return -ENODEV;
2565
0736cfa8
MH
2566 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2567 err = -EBUSY;
2568 goto done;
2569 }
2570
5b69bef5
MH
2571 if (hdev->dev_type != HCI_BREDR) {
2572 err = -EOPNOTSUPP;
2573 goto done;
2574 }
2575
56f87901
JH
2576 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2577 err = -EOPNOTSUPP;
2578 goto done;
2579 }
2580
1da177e4
LT
2581 switch (cmd) {
2582 case HCISETAUTH:
01178cd4
JH
2583 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2584 HCI_INIT_TIMEOUT);
1da177e4
LT
2585 break;
2586
2587 case HCISETENCRYPT:
2588 if (!lmp_encrypt_capable(hdev)) {
2589 err = -EOPNOTSUPP;
2590 break;
2591 }
2592
2593 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2594 /* Auth must be enabled first */
01178cd4
JH
2595 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2596 HCI_INIT_TIMEOUT);
1da177e4
LT
2597 if (err)
2598 break;
2599 }
2600
01178cd4
JH
2601 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2602 HCI_INIT_TIMEOUT);
1da177e4
LT
2603 break;
2604
2605 case HCISETSCAN:
01178cd4
JH
2606 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2607 HCI_INIT_TIMEOUT);
1da177e4
LT
2608 break;
2609
1da177e4 2610 case HCISETLINKPOL:
01178cd4
JH
2611 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2612 HCI_INIT_TIMEOUT);
1da177e4
LT
2613 break;
2614
2615 case HCISETLINKMODE:
e4e8e37c
MH
2616 hdev->link_mode = ((__u16) dr.dev_opt) &
2617 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2618 break;
2619
2620 case HCISETPTYPE:
2621 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2622 break;
2623
2624 case HCISETACLMTU:
e4e8e37c
MH
2625 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2626 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2627 break;
2628
2629 case HCISETSCOMTU:
e4e8e37c
MH
2630 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2631 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2632 break;
2633
2634 default:
2635 err = -EINVAL;
2636 break;
2637 }
e4e8e37c 2638
0736cfa8 2639done:
1da177e4
LT
2640 hci_dev_put(hdev);
2641 return err;
2642}
2643
2644int hci_get_dev_list(void __user *arg)
2645{
8035ded4 2646 struct hci_dev *hdev;
1da177e4
LT
2647 struct hci_dev_list_req *dl;
2648 struct hci_dev_req *dr;
1da177e4
LT
2649 int n = 0, size, err;
2650 __u16 dev_num;
2651
2652 if (get_user(dev_num, (__u16 __user *) arg))
2653 return -EFAULT;
2654
2655 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2656 return -EINVAL;
2657
2658 size = sizeof(*dl) + dev_num * sizeof(*dr);
2659
70f23020
AE
2660 dl = kzalloc(size, GFP_KERNEL);
2661 if (!dl)
1da177e4
LT
2662 return -ENOMEM;
2663
2664 dr = dl->dev_req;
2665
f20d09d5 2666 read_lock(&hci_dev_list_lock);
8035ded4 2667 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2668 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2669 cancel_delayed_work(&hdev->power_off);
c542a06c 2670
a8b2d5c2
JH
2671 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2672 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2673
1da177e4
LT
2674 (dr + n)->dev_id = hdev->id;
2675 (dr + n)->dev_opt = hdev->flags;
c542a06c 2676
1da177e4
LT
2677 if (++n >= dev_num)
2678 break;
2679 }
f20d09d5 2680 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2681
2682 dl->dev_num = n;
2683 size = sizeof(*dl) + n * sizeof(*dr);
2684
2685 err = copy_to_user(arg, dl, size);
2686 kfree(dl);
2687
2688 return err ? -EFAULT : 0;
2689}
2690
2691int hci_get_dev_info(void __user *arg)
2692{
2693 struct hci_dev *hdev;
2694 struct hci_dev_info di;
2695 int err = 0;
2696
2697 if (copy_from_user(&di, arg, sizeof(di)))
2698 return -EFAULT;
2699
70f23020
AE
2700 hdev = hci_dev_get(di.dev_id);
2701 if (!hdev)
1da177e4
LT
2702 return -ENODEV;
2703
a8b2d5c2 2704 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2705 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2706
a8b2d5c2
JH
2707 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2708 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2709
1da177e4
LT
2710 strcpy(di.name, hdev->name);
2711 di.bdaddr = hdev->bdaddr;
60f2a3ed 2712 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2713 di.flags = hdev->flags;
2714 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2715 if (lmp_bredr_capable(hdev)) {
2716 di.acl_mtu = hdev->acl_mtu;
2717 di.acl_pkts = hdev->acl_pkts;
2718 di.sco_mtu = hdev->sco_mtu;
2719 di.sco_pkts = hdev->sco_pkts;
2720 } else {
2721 di.acl_mtu = hdev->le_mtu;
2722 di.acl_pkts = hdev->le_pkts;
2723 di.sco_mtu = 0;
2724 di.sco_pkts = 0;
2725 }
1da177e4
LT
2726 di.link_policy = hdev->link_policy;
2727 di.link_mode = hdev->link_mode;
2728
2729 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2730 memcpy(&di.features, &hdev->features, sizeof(di.features));
2731
2732 if (copy_to_user(arg, &di, sizeof(di)))
2733 err = -EFAULT;
2734
2735 hci_dev_put(hdev);
2736
2737 return err;
2738}
2739
2740/* ---- Interface to HCI drivers ---- */
2741
611b30f7
MH
2742static int hci_rfkill_set_block(void *data, bool blocked)
2743{
2744 struct hci_dev *hdev = data;
2745
2746 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2747
0736cfa8
MH
2748 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2749 return -EBUSY;
2750
5e130367
JH
2751 if (blocked) {
2752 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2753 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2754 hci_dev_do_close(hdev);
5e130367
JH
2755 } else {
2756 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2757 }
611b30f7
MH
2758
2759 return 0;
2760}
2761
2762static const struct rfkill_ops hci_rfkill_ops = {
2763 .set_block = hci_rfkill_set_block,
2764};
2765
ab81cbf9
JH
2766static void hci_power_on(struct work_struct *work)
2767{
2768 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2769 int err;
ab81cbf9
JH
2770
2771 BT_DBG("%s", hdev->name);
2772
cbed0ca1 2773 err = hci_dev_do_open(hdev);
96570ffc
JH
2774 if (err < 0) {
2775 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2776 return;
96570ffc 2777 }
ab81cbf9 2778
a5c8f270
MH
2779 /* During the HCI setup phase, a few error conditions are
2780 * ignored and they need to be checked now. If they are still
2781 * valid, it is important to turn the device back off.
2782 */
2783 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2784 (hdev->dev_type == HCI_BREDR &&
2785 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2786 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2787 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2788 hci_dev_do_close(hdev);
2789 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2790 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2791 HCI_AUTO_OFF_TIMEOUT);
bf543036 2792 }
ab81cbf9 2793
a8b2d5c2 2794 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2795 mgmt_index_added(hdev);
ab81cbf9
JH
2796}
2797
2798static void hci_power_off(struct work_struct *work)
2799{
3243553f 2800 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2801 power_off.work);
ab81cbf9
JH
2802
2803 BT_DBG("%s", hdev->name);
2804
8ee56540 2805 hci_dev_do_close(hdev);
ab81cbf9
JH
2806}
2807
16ab91ab
JH
2808static void hci_discov_off(struct work_struct *work)
2809{
2810 struct hci_dev *hdev;
16ab91ab
JH
2811
2812 hdev = container_of(work, struct hci_dev, discov_off.work);
2813
2814 BT_DBG("%s", hdev->name);
2815
d1967ff8 2816 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2817}
2818
35f7498a 2819void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2820{
4821002c 2821 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2822
4821002c
JH
2823 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2824 list_del(&uuid->list);
2aeb9a1a
JH
2825 kfree(uuid);
2826 }
2aeb9a1a
JH
2827}
2828
35f7498a 2829void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2830{
2831 struct list_head *p, *n;
2832
2833 list_for_each_safe(p, n, &hdev->link_keys) {
2834 struct link_key *key;
2835
2836 key = list_entry(p, struct link_key, list);
2837
2838 list_del(p);
2839 kfree(key);
2840 }
55ed8ca1
JH
2841}
2842
35f7498a 2843void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2844{
2845 struct smp_ltk *k, *tmp;
2846
2847 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2848 list_del(&k->list);
2849 kfree(k);
2850 }
b899efaf
VCG
2851}
2852
970c4e46
JH
2853void hci_smp_irks_clear(struct hci_dev *hdev)
2854{
2855 struct smp_irk *k, *tmp;
2856
2857 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2858 list_del(&k->list);
2859 kfree(k);
2860 }
2861}
2862
55ed8ca1
JH
2863struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2864{
8035ded4 2865 struct link_key *k;
55ed8ca1 2866
8035ded4 2867 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2868 if (bacmp(bdaddr, &k->bdaddr) == 0)
2869 return k;
55ed8ca1
JH
2870
2871 return NULL;
2872}
2873
745c0ce3 2874static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2875 u8 key_type, u8 old_key_type)
d25e28ab
JH
2876{
2877 /* Legacy key */
2878 if (key_type < 0x03)
745c0ce3 2879 return true;
d25e28ab
JH
2880
2881 /* Debug keys are insecure so don't store them persistently */
2882 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2883 return false;
d25e28ab
JH
2884
2885 /* Changed combination key and there's no previous one */
2886 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2887 return false;
d25e28ab
JH
2888
2889 /* Security mode 3 case */
2890 if (!conn)
745c0ce3 2891 return true;
d25e28ab
JH
2892
2893 /* Neither local nor remote side had no-bonding as requirement */
2894 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2895 return true;
d25e28ab
JH
2896
2897 /* Local side had dedicated bonding as requirement */
2898 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2899 return true;
d25e28ab
JH
2900
2901 /* Remote side had dedicated bonding as requirement */
2902 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2903 return true;
d25e28ab
JH
2904
2905 /* If none of the above criteria match, then don't store the key
2906 * persistently */
745c0ce3 2907 return false;
d25e28ab
JH
2908}
2909
98a0b845
JH
2910static bool ltk_type_master(u8 type)
2911{
2912 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2913 return true;
2914
2915 return false;
2916}
2917
fe39c7b2 2918struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
98a0b845 2919 bool master)
75d262c2 2920{
c9839a11 2921 struct smp_ltk *k;
75d262c2 2922
c9839a11 2923 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 2924 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
2925 continue;
2926
98a0b845
JH
2927 if (ltk_type_master(k->type) != master)
2928 continue;
2929
c9839a11 2930 return k;
75d262c2
VCG
2931 }
2932
2933 return NULL;
2934}
75d262c2 2935
c9839a11 2936struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2937 u8 addr_type, bool master)
75d262c2 2938{
c9839a11 2939 struct smp_ltk *k;
75d262c2 2940
c9839a11
VCG
2941 list_for_each_entry(k, &hdev->long_term_keys, list)
2942 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2943 bacmp(bdaddr, &k->bdaddr) == 0 &&
2944 ltk_type_master(k->type) == master)
75d262c2
VCG
2945 return k;
2946
2947 return NULL;
2948}
75d262c2 2949
970c4e46
JH
2950struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2951{
2952 struct smp_irk *irk;
2953
2954 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2955 if (!bacmp(&irk->rpa, rpa))
2956 return irk;
2957 }
2958
2959 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2960 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2961 bacpy(&irk->rpa, rpa);
2962 return irk;
2963 }
2964 }
2965
2966 return NULL;
2967}
2968
2969struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2970 u8 addr_type)
2971{
2972 struct smp_irk *irk;
2973
6cfc9988
JH
2974 /* Identity Address must be public or static random */
2975 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2976 return NULL;
2977
970c4e46
JH
2978 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2979 if (addr_type == irk->addr_type &&
2980 bacmp(bdaddr, &irk->bdaddr) == 0)
2981 return irk;
2982 }
2983
2984 return NULL;
2985}
2986
d25e28ab 2987int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2988 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2989{
2990 struct link_key *key, *old_key;
745c0ce3
VA
2991 u8 old_key_type;
2992 bool persistent;
55ed8ca1
JH
2993
2994 old_key = hci_find_link_key(hdev, bdaddr);
2995 if (old_key) {
2996 old_key_type = old_key->type;
2997 key = old_key;
2998 } else {
12adcf3a 2999 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3000 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1
JH
3001 if (!key)
3002 return -ENOMEM;
3003 list_add(&key->list, &hdev->link_keys);
3004 }
3005
6ed93dc6 3006 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3007
d25e28ab
JH
3008 /* Some buggy controller combinations generate a changed
3009 * combination key for legacy pairing even when there's no
3010 * previous key */
3011 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3012 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3013 type = HCI_LK_COMBINATION;
655fe6ec
JH
3014 if (conn)
3015 conn->key_type = type;
3016 }
d25e28ab 3017
55ed8ca1 3018 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3019 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3020 key->pin_len = pin_len;
3021
b6020ba0 3022 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3023 key->type = old_key_type;
4748fed2
JH
3024 else
3025 key->type = type;
3026
4df378a1
JH
3027 if (!new_key)
3028 return 0;
3029
3030 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3031
744cf19e 3032 mgmt_new_link_key(hdev, key, persistent);
4df378a1 3033
6ec5bcad
VA
3034 if (conn)
3035 conn->flush_key = !persistent;
55ed8ca1
JH
3036
3037 return 0;
3038}
3039
ca9142b8 3040struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3041 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3042 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3043{
c9839a11 3044 struct smp_ltk *key, *old_key;
98a0b845 3045 bool master = ltk_type_master(type);
75d262c2 3046
98a0b845 3047 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3048 if (old_key)
75d262c2 3049 key = old_key;
c9839a11 3050 else {
0a14ab41 3051 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3052 if (!key)
ca9142b8 3053 return NULL;
c9839a11 3054 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3055 }
3056
75d262c2 3057 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3058 key->bdaddr_type = addr_type;
3059 memcpy(key->val, tk, sizeof(key->val));
3060 key->authenticated = authenticated;
3061 key->ediv = ediv;
fe39c7b2 3062 key->rand = rand;
c9839a11
VCG
3063 key->enc_size = enc_size;
3064 key->type = type;
75d262c2 3065
ca9142b8 3066 return key;
75d262c2
VCG
3067}
3068
ca9142b8
JH
3069struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3070 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3071{
3072 struct smp_irk *irk;
3073
3074 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3075 if (!irk) {
3076 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3077 if (!irk)
ca9142b8 3078 return NULL;
970c4e46
JH
3079
3080 bacpy(&irk->bdaddr, bdaddr);
3081 irk->addr_type = addr_type;
3082
3083 list_add(&irk->list, &hdev->identity_resolving_keys);
3084 }
3085
3086 memcpy(irk->val, val, 16);
3087 bacpy(&irk->rpa, rpa);
3088
ca9142b8 3089 return irk;
970c4e46
JH
3090}
3091
55ed8ca1
JH
3092int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3093{
3094 struct link_key *key;
3095
3096 key = hci_find_link_key(hdev, bdaddr);
3097 if (!key)
3098 return -ENOENT;
3099
6ed93dc6 3100 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3101
3102 list_del(&key->list);
3103 kfree(key);
3104
3105 return 0;
3106}
3107
e0b2b27e 3108int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3109{
3110 struct smp_ltk *k, *tmp;
c51ffa0b 3111 int removed = 0;
b899efaf
VCG
3112
3113 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3114 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3115 continue;
3116
6ed93dc6 3117 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3118
3119 list_del(&k->list);
3120 kfree(k);
c51ffa0b 3121 removed++;
b899efaf
VCG
3122 }
3123
c51ffa0b 3124 return removed ? 0 : -ENOENT;
b899efaf
VCG
3125}
3126
a7ec7338
JH
3127void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3128{
3129 struct smp_irk *k, *tmp;
3130
668b7b19 3131 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3132 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3133 continue;
3134
3135 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3136
3137 list_del(&k->list);
3138 kfree(k);
3139 }
3140}
3141
6bd32326 3142/* HCI command timer function */
bda4f23a 3143static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
3144{
3145 struct hci_dev *hdev = (void *) arg;
3146
bda4f23a
AE
3147 if (hdev->sent_cmd) {
3148 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3149 u16 opcode = __le16_to_cpu(sent->opcode);
3150
3151 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3152 } else {
3153 BT_ERR("%s command tx timeout", hdev->name);
3154 }
3155
6bd32326 3156 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3157 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3158}
3159
2763eda6 3160struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3161 bdaddr_t *bdaddr)
2763eda6
SJ
3162{
3163 struct oob_data *data;
3164
3165 list_for_each_entry(data, &hdev->remote_oob_data, list)
3166 if (bacmp(bdaddr, &data->bdaddr) == 0)
3167 return data;
3168
3169 return NULL;
3170}
3171
3172int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3173{
3174 struct oob_data *data;
3175
3176 data = hci_find_remote_oob_data(hdev, bdaddr);
3177 if (!data)
3178 return -ENOENT;
3179
6ed93dc6 3180 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3181
3182 list_del(&data->list);
3183 kfree(data);
3184
3185 return 0;
3186}
3187
35f7498a 3188void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3189{
3190 struct oob_data *data, *n;
3191
3192 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3193 list_del(&data->list);
3194 kfree(data);
3195 }
2763eda6
SJ
3196}
3197
0798872e
MH
3198int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3199 u8 *hash, u8 *randomizer)
2763eda6
SJ
3200{
3201 struct oob_data *data;
3202
3203 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3204 if (!data) {
0a14ab41 3205 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3206 if (!data)
3207 return -ENOMEM;
3208
3209 bacpy(&data->bdaddr, bdaddr);
3210 list_add(&data->list, &hdev->remote_oob_data);
3211 }
3212
519ca9d0
MH
3213 memcpy(data->hash192, hash, sizeof(data->hash192));
3214 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3215
0798872e
MH
3216 memset(data->hash256, 0, sizeof(data->hash256));
3217 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3218
3219 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3220
3221 return 0;
3222}
3223
3224int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3225 u8 *hash192, u8 *randomizer192,
3226 u8 *hash256, u8 *randomizer256)
3227{
3228 struct oob_data *data;
3229
3230 data = hci_find_remote_oob_data(hdev, bdaddr);
3231 if (!data) {
0a14ab41 3232 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3233 if (!data)
3234 return -ENOMEM;
3235
3236 bacpy(&data->bdaddr, bdaddr);
3237 list_add(&data->list, &hdev->remote_oob_data);
3238 }
3239
3240 memcpy(data->hash192, hash192, sizeof(data->hash192));
3241 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3242
3243 memcpy(data->hash256, hash256, sizeof(data->hash256));
3244 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3245
6ed93dc6 3246 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3247
3248 return 0;
3249}
3250
b9ee0a78
MH
3251struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3252 bdaddr_t *bdaddr, u8 type)
b2a66aad 3253{
8035ded4 3254 struct bdaddr_list *b;
b2a66aad 3255
b9ee0a78
MH
3256 list_for_each_entry(b, &hdev->blacklist, list) {
3257 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3258 return b;
b9ee0a78 3259 }
b2a66aad
AJ
3260
3261 return NULL;
3262}
3263
c9507490 3264static void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3265{
3266 struct list_head *p, *n;
3267
3268 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3269 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3270
3271 list_del(p);
3272 kfree(b);
3273 }
b2a66aad
AJ
3274}
3275
88c1fe4b 3276int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3277{
3278 struct bdaddr_list *entry;
b2a66aad 3279
b9ee0a78 3280 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3281 return -EBADF;
3282
b9ee0a78 3283 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3284 return -EEXIST;
b2a66aad
AJ
3285
3286 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3287 if (!entry)
3288 return -ENOMEM;
b2a66aad
AJ
3289
3290 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3291 entry->bdaddr_type = type;
b2a66aad
AJ
3292
3293 list_add(&entry->list, &hdev->blacklist);
3294
88c1fe4b 3295 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
3296}
3297
88c1fe4b 3298int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3299{
3300 struct bdaddr_list *entry;
b2a66aad 3301
35f7498a
JH
3302 if (!bacmp(bdaddr, BDADDR_ANY)) {
3303 hci_blacklist_clear(hdev);
3304 return 0;
3305 }
b2a66aad 3306
b9ee0a78 3307 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3308 if (!entry)
5e762444 3309 return -ENOENT;
b2a66aad
AJ
3310
3311 list_del(&entry->list);
3312 kfree(entry);
3313
88c1fe4b 3314 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3315}
3316
d2ab0ac1
MH
3317struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3318 bdaddr_t *bdaddr, u8 type)
3319{
3320 struct bdaddr_list *b;
3321
3322 list_for_each_entry(b, &hdev->le_white_list, list) {
3323 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3324 return b;
3325 }
3326
3327 return NULL;
3328}
3329
3330void hci_white_list_clear(struct hci_dev *hdev)
3331{
3332 struct list_head *p, *n;
3333
3334 list_for_each_safe(p, n, &hdev->le_white_list) {
3335 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3336
3337 list_del(p);
3338 kfree(b);
3339 }
3340}
3341
3342int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3343{
3344 struct bdaddr_list *entry;
3345
3346 if (!bacmp(bdaddr, BDADDR_ANY))
3347 return -EBADF;
3348
3349 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3350 if (!entry)
3351 return -ENOMEM;
3352
3353 bacpy(&entry->bdaddr, bdaddr);
3354 entry->bdaddr_type = type;
3355
3356 list_add(&entry->list, &hdev->le_white_list);
3357
3358 return 0;
3359}
3360
3361int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3362{
3363 struct bdaddr_list *entry;
3364
3365 if (!bacmp(bdaddr, BDADDR_ANY))
3366 return -EBADF;
3367
3368 entry = hci_white_list_lookup(hdev, bdaddr, type);
3369 if (!entry)
3370 return -ENOENT;
3371
3372 list_del(&entry->list);
3373 kfree(entry);
3374
3375 return 0;
3376}
3377
15819a70
AG
3378/* This function requires the caller holds hdev->lock */
3379struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3380 bdaddr_t *addr, u8 addr_type)
3381{
3382 struct hci_conn_params *params;
3383
3384 list_for_each_entry(params, &hdev->le_conn_params, list) {
3385 if (bacmp(&params->addr, addr) == 0 &&
3386 params->addr_type == addr_type) {
3387 return params;
3388 }
3389 }
3390
3391 return NULL;
3392}
3393
cef952ce
AG
3394static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3395{
3396 struct hci_conn *conn;
3397
3398 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3399 if (!conn)
3400 return false;
3401
3402 if (conn->dst_type != type)
3403 return false;
3404
3405 if (conn->state != BT_CONNECTED)
3406 return false;
3407
3408 return true;
3409}
3410
a9b0a04c
AG
3411static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3412{
3413 if (addr_type == ADDR_LE_DEV_PUBLIC)
3414 return true;
3415
3416 /* Check for Random Static address type */
3417 if ((addr->b[5] & 0xc0) == 0xc0)
3418 return true;
3419
3420 return false;
3421}
3422
15819a70 3423/* This function requires the caller holds hdev->lock */
a9b0a04c
AG
3424int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3425 u8 auto_connect, u16 conn_min_interval,
3426 u16 conn_max_interval)
15819a70
AG
3427{
3428 struct hci_conn_params *params;
3429
a9b0a04c
AG
3430 if (!is_identity_address(addr, addr_type))
3431 return -EINVAL;
3432
15819a70 3433 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce
AG
3434 if (params)
3435 goto update;
15819a70
AG
3436
3437 params = kzalloc(sizeof(*params), GFP_KERNEL);
3438 if (!params) {
3439 BT_ERR("Out of memory");
a9b0a04c 3440 return -ENOMEM;
15819a70
AG
3441 }
3442
3443 bacpy(&params->addr, addr);
3444 params->addr_type = addr_type;
cef952ce
AG
3445
3446 list_add(&params->list, &hdev->le_conn_params);
3447
3448update:
15819a70
AG
3449 params->conn_min_interval = conn_min_interval;
3450 params->conn_max_interval = conn_max_interval;
9fcb18ef 3451 params->auto_connect = auto_connect;
15819a70 3452
cef952ce
AG
3453 switch (auto_connect) {
3454 case HCI_AUTO_CONN_DISABLED:
3455 case HCI_AUTO_CONN_LINK_LOSS:
3456 hci_pend_le_conn_del(hdev, addr, addr_type);
3457 break;
3458 case HCI_AUTO_CONN_ALWAYS:
3459 if (!is_connected(hdev, addr, addr_type))
3460 hci_pend_le_conn_add(hdev, addr, addr_type);
3461 break;
3462 }
15819a70 3463
9fcb18ef
AG
3464 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3465 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3466 conn_min_interval, conn_max_interval);
a9b0a04c
AG
3467
3468 return 0;
15819a70
AG
3469}
3470
3471/* This function requires the caller holds hdev->lock */
3472void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3473{
3474 struct hci_conn_params *params;
3475
3476 params = hci_conn_params_lookup(hdev, addr, addr_type);
3477 if (!params)
3478 return;
3479
cef952ce
AG
3480 hci_pend_le_conn_del(hdev, addr, addr_type);
3481
15819a70
AG
3482 list_del(&params->list);
3483 kfree(params);
3484
3485 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3486}
3487
3488/* This function requires the caller holds hdev->lock */
3489void hci_conn_params_clear(struct hci_dev *hdev)
3490{
3491 struct hci_conn_params *params, *tmp;
3492
3493 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3494 list_del(&params->list);
3495 kfree(params);
3496 }
3497
3498 BT_DBG("All LE connection parameters were removed");
3499}
3500
77a77a30
AG
3501/* This function requires the caller holds hdev->lock */
3502struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3503 bdaddr_t *addr, u8 addr_type)
3504{
3505 struct bdaddr_list *entry;
3506
3507 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3508 if (bacmp(&entry->bdaddr, addr) == 0 &&
3509 entry->bdaddr_type == addr_type)
3510 return entry;
3511 }
3512
3513 return NULL;
3514}
3515
3516/* This function requires the caller holds hdev->lock */
3517void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3518{
3519 struct bdaddr_list *entry;
3520
3521 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3522 if (entry)
a4790dbd 3523 goto done;
77a77a30
AG
3524
3525 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3526 if (!entry) {
3527 BT_ERR("Out of memory");
3528 return;
3529 }
3530
3531 bacpy(&entry->bdaddr, addr);
3532 entry->bdaddr_type = addr_type;
3533
3534 list_add(&entry->list, &hdev->pend_le_conns);
3535
3536 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3537
3538done:
3539 hci_update_background_scan(hdev);
77a77a30
AG
3540}
3541
3542/* This function requires the caller holds hdev->lock */
3543void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3544{
3545 struct bdaddr_list *entry;
3546
3547 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3548 if (!entry)
a4790dbd 3549 goto done;
77a77a30
AG
3550
3551 list_del(&entry->list);
3552 kfree(entry);
3553
3554 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3555
3556done:
3557 hci_update_background_scan(hdev);
77a77a30
AG
3558}
3559
3560/* This function requires the caller holds hdev->lock */
3561void hci_pend_le_conns_clear(struct hci_dev *hdev)
3562{
3563 struct bdaddr_list *entry, *tmp;
3564
3565 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3566 list_del(&entry->list);
3567 kfree(entry);
3568 }
3569
3570 BT_DBG("All LE pending connections cleared");
3571}
3572
4c87eaab 3573static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3574{
4c87eaab
AG
3575 if (status) {
3576 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3577
4c87eaab
AG
3578 hci_dev_lock(hdev);
3579 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3580 hci_dev_unlock(hdev);
3581 return;
3582 }
7ba8b4be
AG
3583}
3584
4c87eaab 3585static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3586{
4c87eaab
AG
3587 /* General inquiry access code (GIAC) */
3588 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3589 struct hci_request req;
3590 struct hci_cp_inquiry cp;
7ba8b4be
AG
3591 int err;
3592
4c87eaab
AG
3593 if (status) {
3594 BT_ERR("Failed to disable LE scanning: status %d", status);
3595 return;
3596 }
7ba8b4be 3597
4c87eaab
AG
3598 switch (hdev->discovery.type) {
3599 case DISCOV_TYPE_LE:
3600 hci_dev_lock(hdev);
3601 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3602 hci_dev_unlock(hdev);
3603 break;
7ba8b4be 3604
4c87eaab
AG
3605 case DISCOV_TYPE_INTERLEAVED:
3606 hci_req_init(&req, hdev);
7ba8b4be 3607
4c87eaab
AG
3608 memset(&cp, 0, sizeof(cp));
3609 memcpy(&cp.lap, lap, sizeof(cp.lap));
3610 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3611 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3612
4c87eaab 3613 hci_dev_lock(hdev);
7dbfac1d 3614
4c87eaab 3615 hci_inquiry_cache_flush(hdev);
7dbfac1d 3616
4c87eaab
AG
3617 err = hci_req_run(&req, inquiry_complete);
3618 if (err) {
3619 BT_ERR("Inquiry request failed: err %d", err);
3620 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3621 }
7dbfac1d 3622
4c87eaab
AG
3623 hci_dev_unlock(hdev);
3624 break;
7dbfac1d 3625 }
7dbfac1d
AG
3626}
3627
7ba8b4be
AG
3628static void le_scan_disable_work(struct work_struct *work)
3629{
3630 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3631 le_scan_disable.work);
4c87eaab
AG
3632 struct hci_request req;
3633 int err;
7ba8b4be
AG
3634
3635 BT_DBG("%s", hdev->name);
3636
4c87eaab 3637 hci_req_init(&req, hdev);
28b75a89 3638
b1efcc28 3639 hci_req_add_le_scan_disable(&req);
28b75a89 3640
4c87eaab
AG
3641 err = hci_req_run(&req, le_scan_disable_work_complete);
3642 if (err)
3643 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3644}
3645
8d97250e
JH
3646static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3647{
3648 struct hci_dev *hdev = req->hdev;
3649
3650 /* If we're advertising or initiating an LE connection we can't
3651 * go ahead and change the random address at this time. This is
3652 * because the eventual initiator address used for the
3653 * subsequently created connection will be undefined (some
3654 * controllers use the new address and others the one we had
3655 * when the operation started).
3656 *
3657 * In this kind of scenario skip the update and let the random
3658 * address be updated at the next cycle.
3659 */
3660 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3661 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3662 BT_DBG("Deferring random address update");
3663 return;
3664 }
3665
3666 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3667}
3668
94b1fc92
MH
3669int hci_update_random_address(struct hci_request *req, bool require_privacy,
3670 u8 *own_addr_type)
ebd3a747
JH
3671{
3672 struct hci_dev *hdev = req->hdev;
3673 int err;
3674
3675 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3676 * current RPA has expired or there is something else than
3677 * the current RPA in use, then generate a new one.
ebd3a747
JH
3678 */
3679 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3680 int to;
3681
3682 *own_addr_type = ADDR_LE_DEV_RANDOM;
3683
3684 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3685 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3686 return 0;
3687
2b5224dc 3688 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3689 if (err < 0) {
3690 BT_ERR("%s failed to generate new RPA", hdev->name);
3691 return err;
3692 }
3693
8d97250e 3694 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3695
3696 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3697 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3698
3699 return 0;
94b1fc92
MH
3700 }
3701
3702 /* In case of required privacy without resolvable private address,
3703 * use an unresolvable private address. This is useful for active
3704 * scanning and non-connectable advertising.
3705 */
3706 if (require_privacy) {
3707 bdaddr_t urpa;
3708
3709 get_random_bytes(&urpa, 6);
3710 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3711
3712 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3713 set_random_addr(req, &urpa);
94b1fc92 3714 return 0;
ebd3a747
JH
3715 }
3716
3717 /* If forcing static address is in use or there is no public
3718 * address use the static address as random address (but skip
3719 * the HCI command if the current random address is already the
3720 * static one.
3721 */
3722 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3723 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3724 *own_addr_type = ADDR_LE_DEV_RANDOM;
3725 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3726 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3727 &hdev->static_addr);
3728 return 0;
3729 }
3730
3731 /* Neither privacy nor static address is being used so use a
3732 * public address.
3733 */
3734 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3735
3736 return 0;
3737}
3738
a1f4c318
JH
3739/* Copy the Identity Address of the controller.
3740 *
3741 * If the controller has a public BD_ADDR, then by default use that one.
3742 * If this is a LE only controller without a public address, default to
3743 * the static random address.
3744 *
3745 * For debugging purposes it is possible to force controllers with a
3746 * public address to use the static random address instead.
3747 */
3748void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3749 u8 *bdaddr_type)
3750{
3751 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3752 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3753 bacpy(bdaddr, &hdev->static_addr);
3754 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3755 } else {
3756 bacpy(bdaddr, &hdev->bdaddr);
3757 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3758 }
3759}
3760
9be0dab7
DH
3761/* Alloc HCI device */
3762struct hci_dev *hci_alloc_dev(void)
3763{
3764 struct hci_dev *hdev;
3765
3766 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3767 if (!hdev)
3768 return NULL;
3769
b1b813d4
DH
3770 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3771 hdev->esco_type = (ESCO_HV1);
3772 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3773 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3774 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3775 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3776 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3777
b1b813d4
DH
3778 hdev->sniff_max_interval = 800;
3779 hdev->sniff_min_interval = 80;
3780
3f959d46 3781 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3782 hdev->le_scan_interval = 0x0060;
3783 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3784 hdev->le_conn_min_interval = 0x0028;
3785 hdev->le_conn_max_interval = 0x0038;
bef64738 3786
d6bfd59c
JH
3787 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3788
b1b813d4
DH
3789 mutex_init(&hdev->lock);
3790 mutex_init(&hdev->req_lock);
3791
3792 INIT_LIST_HEAD(&hdev->mgmt_pending);
3793 INIT_LIST_HEAD(&hdev->blacklist);
3794 INIT_LIST_HEAD(&hdev->uuids);
3795 INIT_LIST_HEAD(&hdev->link_keys);
3796 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3797 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3798 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3799 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3800 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3801 INIT_LIST_HEAD(&hdev->pend_le_conns);
6b536b5e 3802 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3803
3804 INIT_WORK(&hdev->rx_work, hci_rx_work);
3805 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3806 INIT_WORK(&hdev->tx_work, hci_tx_work);
3807 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3808
b1b813d4
DH
3809 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3810 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3811 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3812
b1b813d4
DH
3813 skb_queue_head_init(&hdev->rx_q);
3814 skb_queue_head_init(&hdev->cmd_q);
3815 skb_queue_head_init(&hdev->raw_q);
3816
3817 init_waitqueue_head(&hdev->req_wait_q);
3818
bda4f23a 3819 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 3820
b1b813d4
DH
3821 hci_init_sysfs(hdev);
3822 discovery_init(hdev);
9be0dab7
DH
3823
3824 return hdev;
3825}
3826EXPORT_SYMBOL(hci_alloc_dev);
3827
3828/* Free HCI device */
3829void hci_free_dev(struct hci_dev *hdev)
3830{
9be0dab7
DH
3831 /* will free via device release */
3832 put_device(&hdev->dev);
3833}
3834EXPORT_SYMBOL(hci_free_dev);
3835
1da177e4
LT
3836/* Register HCI device */
3837int hci_register_dev(struct hci_dev *hdev)
3838{
b1b813d4 3839 int id, error;
1da177e4 3840
010666a1 3841 if (!hdev->open || !hdev->close)
1da177e4
LT
3842 return -EINVAL;
3843
08add513
MM
3844 /* Do not allow HCI_AMP devices to register at index 0,
3845 * so the index can be used as the AMP controller ID.
3846 */
3df92b31
SL
3847 switch (hdev->dev_type) {
3848 case HCI_BREDR:
3849 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3850 break;
3851 case HCI_AMP:
3852 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3853 break;
3854 default:
3855 return -EINVAL;
1da177e4 3856 }
8e87d142 3857
3df92b31
SL
3858 if (id < 0)
3859 return id;
3860
1da177e4
LT
3861 sprintf(hdev->name, "hci%d", id);
3862 hdev->id = id;
2d8b3a11
AE
3863
3864 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3865
d8537548
KC
3866 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3867 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3868 if (!hdev->workqueue) {
3869 error = -ENOMEM;
3870 goto err;
3871 }
f48fd9c8 3872
d8537548
KC
3873 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3874 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3875 if (!hdev->req_workqueue) {
3876 destroy_workqueue(hdev->workqueue);
3877 error = -ENOMEM;
3878 goto err;
3879 }
3880
0153e2ec
MH
3881 if (!IS_ERR_OR_NULL(bt_debugfs))
3882 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3883
bdc3e0f1
MH
3884 dev_set_name(&hdev->dev, "%s", hdev->name);
3885
99780a7b
JH
3886 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3887 CRYPTO_ALG_ASYNC);
3888 if (IS_ERR(hdev->tfm_aes)) {
3889 BT_ERR("Unable to create crypto context");
3890 error = PTR_ERR(hdev->tfm_aes);
3891 hdev->tfm_aes = NULL;
3892 goto err_wqueue;
3893 }
3894
bdc3e0f1 3895 error = device_add(&hdev->dev);
33ca954d 3896 if (error < 0)
99780a7b 3897 goto err_tfm;
1da177e4 3898
611b30f7 3899 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3900 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3901 hdev);
611b30f7
MH
3902 if (hdev->rfkill) {
3903 if (rfkill_register(hdev->rfkill) < 0) {
3904 rfkill_destroy(hdev->rfkill);
3905 hdev->rfkill = NULL;
3906 }
3907 }
3908
5e130367
JH
3909 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3910 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3911
a8b2d5c2 3912 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3913 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3914
01cd3404 3915 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3916 /* Assume BR/EDR support until proven otherwise (such as
3917 * through reading supported features during init.
3918 */
3919 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3920 }
ce2be9ac 3921
fcee3377
GP
3922 write_lock(&hci_dev_list_lock);
3923 list_add(&hdev->list, &hci_dev_list);
3924 write_unlock(&hci_dev_list_lock);
3925
1da177e4 3926 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3927 hci_dev_hold(hdev);
1da177e4 3928
19202573 3929 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3930
1da177e4 3931 return id;
f48fd9c8 3932
99780a7b
JH
3933err_tfm:
3934 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3935err_wqueue:
3936 destroy_workqueue(hdev->workqueue);
6ead1bbc 3937 destroy_workqueue(hdev->req_workqueue);
33ca954d 3938err:
3df92b31 3939 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3940
33ca954d 3941 return error;
1da177e4
LT
3942}
3943EXPORT_SYMBOL(hci_register_dev);
3944
3945/* Unregister HCI device */
59735631 3946void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3947{
3df92b31 3948 int i, id;
ef222013 3949
c13854ce 3950 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3951
94324962
JH
3952 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3953
3df92b31
SL
3954 id = hdev->id;
3955
f20d09d5 3956 write_lock(&hci_dev_list_lock);
1da177e4 3957 list_del(&hdev->list);
f20d09d5 3958 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3959
3960 hci_dev_do_close(hdev);
3961
cd4c5391 3962 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3963 kfree_skb(hdev->reassembly[i]);
3964
b9b5ef18
GP
3965 cancel_work_sync(&hdev->power_on);
3966
ab81cbf9 3967 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3968 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3969 hci_dev_lock(hdev);
744cf19e 3970 mgmt_index_removed(hdev);
09fd0de5 3971 hci_dev_unlock(hdev);
56e5cb86 3972 }
ab81cbf9 3973
2e58ef3e
JH
3974 /* mgmt_index_removed should take care of emptying the
3975 * pending list */
3976 BUG_ON(!list_empty(&hdev->mgmt_pending));
3977
1da177e4
LT
3978 hci_notify(hdev, HCI_DEV_UNREG);
3979
611b30f7
MH
3980 if (hdev->rfkill) {
3981 rfkill_unregister(hdev->rfkill);
3982 rfkill_destroy(hdev->rfkill);
3983 }
3984
99780a7b
JH
3985 if (hdev->tfm_aes)
3986 crypto_free_blkcipher(hdev->tfm_aes);
3987
bdc3e0f1 3988 device_del(&hdev->dev);
147e2d59 3989
0153e2ec
MH
3990 debugfs_remove_recursive(hdev->debugfs);
3991
f48fd9c8 3992 destroy_workqueue(hdev->workqueue);
6ead1bbc 3993 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3994
09fd0de5 3995 hci_dev_lock(hdev);
e2e0cacb 3996 hci_blacklist_clear(hdev);
2aeb9a1a 3997 hci_uuids_clear(hdev);
55ed8ca1 3998 hci_link_keys_clear(hdev);
b899efaf 3999 hci_smp_ltks_clear(hdev);
970c4e46 4000 hci_smp_irks_clear(hdev);
2763eda6 4001 hci_remote_oob_data_clear(hdev);
d2ab0ac1 4002 hci_white_list_clear(hdev);
15819a70 4003 hci_conn_params_clear(hdev);
77a77a30 4004 hci_pend_le_conns_clear(hdev);
09fd0de5 4005 hci_dev_unlock(hdev);
e2e0cacb 4006
dc946bd8 4007 hci_dev_put(hdev);
3df92b31
SL
4008
4009 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4010}
4011EXPORT_SYMBOL(hci_unregister_dev);
4012
4013/* Suspend HCI device */
4014int hci_suspend_dev(struct hci_dev *hdev)
4015{
4016 hci_notify(hdev, HCI_DEV_SUSPEND);
4017 return 0;
4018}
4019EXPORT_SYMBOL(hci_suspend_dev);
4020
4021/* Resume HCI device */
4022int hci_resume_dev(struct hci_dev *hdev)
4023{
4024 hci_notify(hdev, HCI_DEV_RESUME);
4025 return 0;
4026}
4027EXPORT_SYMBOL(hci_resume_dev);
4028
76bca880 4029/* Receive frame from HCI drivers */
e1a26170 4030int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4031{
76bca880 4032 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4033 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4034 kfree_skb(skb);
4035 return -ENXIO;
4036 }
4037
d82603c6 4038 /* Incoming skb */
76bca880
MH
4039 bt_cb(skb)->incoming = 1;
4040
4041 /* Time stamp */
4042 __net_timestamp(skb);
4043
76bca880 4044 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4045 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4046
76bca880
MH
4047 return 0;
4048}
4049EXPORT_SYMBOL(hci_recv_frame);
4050
33e882a5 4051static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4052 int count, __u8 index)
33e882a5
SS
4053{
4054 int len = 0;
4055 int hlen = 0;
4056 int remain = count;
4057 struct sk_buff *skb;
4058 struct bt_skb_cb *scb;
4059
4060 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4061 index >= NUM_REASSEMBLY)
33e882a5
SS
4062 return -EILSEQ;
4063
4064 skb = hdev->reassembly[index];
4065
4066 if (!skb) {
4067 switch (type) {
4068 case HCI_ACLDATA_PKT:
4069 len = HCI_MAX_FRAME_SIZE;
4070 hlen = HCI_ACL_HDR_SIZE;
4071 break;
4072 case HCI_EVENT_PKT:
4073 len = HCI_MAX_EVENT_SIZE;
4074 hlen = HCI_EVENT_HDR_SIZE;
4075 break;
4076 case HCI_SCODATA_PKT:
4077 len = HCI_MAX_SCO_SIZE;
4078 hlen = HCI_SCO_HDR_SIZE;
4079 break;
4080 }
4081
1e429f38 4082 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4083 if (!skb)
4084 return -ENOMEM;
4085
4086 scb = (void *) skb->cb;
4087 scb->expect = hlen;
4088 scb->pkt_type = type;
4089
33e882a5
SS
4090 hdev->reassembly[index] = skb;
4091 }
4092
4093 while (count) {
4094 scb = (void *) skb->cb;
89bb46d0 4095 len = min_t(uint, scb->expect, count);
33e882a5
SS
4096
4097 memcpy(skb_put(skb, len), data, len);
4098
4099 count -= len;
4100 data += len;
4101 scb->expect -= len;
4102 remain = count;
4103
4104 switch (type) {
4105 case HCI_EVENT_PKT:
4106 if (skb->len == HCI_EVENT_HDR_SIZE) {
4107 struct hci_event_hdr *h = hci_event_hdr(skb);
4108 scb->expect = h->plen;
4109
4110 if (skb_tailroom(skb) < scb->expect) {
4111 kfree_skb(skb);
4112 hdev->reassembly[index] = NULL;
4113 return -ENOMEM;
4114 }
4115 }
4116 break;
4117
4118 case HCI_ACLDATA_PKT:
4119 if (skb->len == HCI_ACL_HDR_SIZE) {
4120 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4121 scb->expect = __le16_to_cpu(h->dlen);
4122
4123 if (skb_tailroom(skb) < scb->expect) {
4124 kfree_skb(skb);
4125 hdev->reassembly[index] = NULL;
4126 return -ENOMEM;
4127 }
4128 }
4129 break;
4130
4131 case HCI_SCODATA_PKT:
4132 if (skb->len == HCI_SCO_HDR_SIZE) {
4133 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4134 scb->expect = h->dlen;
4135
4136 if (skb_tailroom(skb) < scb->expect) {
4137 kfree_skb(skb);
4138 hdev->reassembly[index] = NULL;
4139 return -ENOMEM;
4140 }
4141 }
4142 break;
4143 }
4144
4145 if (scb->expect == 0) {
4146 /* Complete frame */
4147
4148 bt_cb(skb)->pkt_type = type;
e1a26170 4149 hci_recv_frame(hdev, skb);
33e882a5
SS
4150
4151 hdev->reassembly[index] = NULL;
4152 return remain;
4153 }
4154 }
4155
4156 return remain;
4157}
4158
ef222013
MH
4159int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4160{
f39a3c06
SS
4161 int rem = 0;
4162
ef222013
MH
4163 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4164 return -EILSEQ;
4165
da5f6c37 4166 while (count) {
1e429f38 4167 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4168 if (rem < 0)
4169 return rem;
ef222013 4170
f39a3c06
SS
4171 data += (count - rem);
4172 count = rem;
f81c6224 4173 }
ef222013 4174
f39a3c06 4175 return rem;
ef222013
MH
4176}
4177EXPORT_SYMBOL(hci_recv_fragment);
4178
99811510
SS
4179#define STREAM_REASSEMBLY 0
4180
4181int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4182{
4183 int type;
4184 int rem = 0;
4185
da5f6c37 4186 while (count) {
99811510
SS
4187 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4188
4189 if (!skb) {
4190 struct { char type; } *pkt;
4191
4192 /* Start of the frame */
4193 pkt = data;
4194 type = pkt->type;
4195
4196 data++;
4197 count--;
4198 } else
4199 type = bt_cb(skb)->pkt_type;
4200
1e429f38 4201 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4202 STREAM_REASSEMBLY);
99811510
SS
4203 if (rem < 0)
4204 return rem;
4205
4206 data += (count - rem);
4207 count = rem;
f81c6224 4208 }
99811510
SS
4209
4210 return rem;
4211}
4212EXPORT_SYMBOL(hci_recv_stream_fragment);
4213
1da177e4
LT
4214/* ---- Interface to upper protocols ---- */
4215
1da177e4
LT
4216int hci_register_cb(struct hci_cb *cb)
4217{
4218 BT_DBG("%p name %s", cb, cb->name);
4219
f20d09d5 4220 write_lock(&hci_cb_list_lock);
1da177e4 4221 list_add(&cb->list, &hci_cb_list);
f20d09d5 4222 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4223
4224 return 0;
4225}
4226EXPORT_SYMBOL(hci_register_cb);
4227
4228int hci_unregister_cb(struct hci_cb *cb)
4229{
4230 BT_DBG("%p name %s", cb, cb->name);
4231
f20d09d5 4232 write_lock(&hci_cb_list_lock);
1da177e4 4233 list_del(&cb->list);
f20d09d5 4234 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4235
4236 return 0;
4237}
4238EXPORT_SYMBOL(hci_unregister_cb);
4239
51086991 4240static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4241{
0d48d939 4242 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4243
cd82e61c
MH
4244 /* Time stamp */
4245 __net_timestamp(skb);
1da177e4 4246
cd82e61c
MH
4247 /* Send copy to monitor */
4248 hci_send_to_monitor(hdev, skb);
4249
4250 if (atomic_read(&hdev->promisc)) {
4251 /* Send copy to the sockets */
470fe1b5 4252 hci_send_to_sock(hdev, skb);
1da177e4
LT
4253 }
4254
4255 /* Get rid of skb owner, prior to sending to the driver. */
4256 skb_orphan(skb);
4257
7bd8f09f 4258 if (hdev->send(hdev, skb) < 0)
51086991 4259 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
4260}
4261
3119ae95
JH
4262void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4263{
4264 skb_queue_head_init(&req->cmd_q);
4265 req->hdev = hdev;
5d73e034 4266 req->err = 0;
3119ae95
JH
4267}
4268
4269int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4270{
4271 struct hci_dev *hdev = req->hdev;
4272 struct sk_buff *skb;
4273 unsigned long flags;
4274
4275 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4276
5d73e034
AG
4277 /* If an error occured during request building, remove all HCI
4278 * commands queued on the HCI request queue.
4279 */
4280 if (req->err) {
4281 skb_queue_purge(&req->cmd_q);
4282 return req->err;
4283 }
4284
3119ae95
JH
4285 /* Do not allow empty requests */
4286 if (skb_queue_empty(&req->cmd_q))
382b0c39 4287 return -ENODATA;
3119ae95
JH
4288
4289 skb = skb_peek_tail(&req->cmd_q);
4290 bt_cb(skb)->req.complete = complete;
4291
4292 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4293 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4294 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4295
4296 queue_work(hdev->workqueue, &hdev->cmd_work);
4297
4298 return 0;
4299}
4300
1ca3a9d0 4301static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4302 u32 plen, const void *param)
1da177e4
LT
4303{
4304 int len = HCI_COMMAND_HDR_SIZE + plen;
4305 struct hci_command_hdr *hdr;
4306 struct sk_buff *skb;
4307
1da177e4 4308 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4309 if (!skb)
4310 return NULL;
1da177e4
LT
4311
4312 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4313 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4314 hdr->plen = plen;
4315
4316 if (plen)
4317 memcpy(skb_put(skb, plen), param, plen);
4318
4319 BT_DBG("skb len %d", skb->len);
4320
0d48d939 4321 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4322
1ca3a9d0
JH
4323 return skb;
4324}
4325
4326/* Send HCI command */
07dc93dd
JH
4327int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4328 const void *param)
1ca3a9d0
JH
4329{
4330 struct sk_buff *skb;
4331
4332 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4333
4334 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4335 if (!skb) {
4336 BT_ERR("%s no memory for command", hdev->name);
4337 return -ENOMEM;
4338 }
4339
11714b3d
JH
4340 /* Stand-alone HCI commands must be flaged as
4341 * single-command requests.
4342 */
4343 bt_cb(skb)->req.start = true;
4344
1da177e4 4345 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4346 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4347
4348 return 0;
4349}
1da177e4 4350
71c76a17 4351/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4352void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4353 const void *param, u8 event)
71c76a17
JH
4354{
4355 struct hci_dev *hdev = req->hdev;
4356 struct sk_buff *skb;
4357
4358 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4359
34739c1e
AG
4360 /* If an error occured during request building, there is no point in
4361 * queueing the HCI command. We can simply return.
4362 */
4363 if (req->err)
4364 return;
4365
71c76a17
JH
4366 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4367 if (!skb) {
5d73e034
AG
4368 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4369 hdev->name, opcode);
4370 req->err = -ENOMEM;
e348fe6b 4371 return;
71c76a17
JH
4372 }
4373
4374 if (skb_queue_empty(&req->cmd_q))
4375 bt_cb(skb)->req.start = true;
4376
02350a72
JH
4377 bt_cb(skb)->req.event = event;
4378
71c76a17 4379 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4380}
4381
07dc93dd
JH
4382void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4383 const void *param)
02350a72
JH
4384{
4385 hci_req_add_ev(req, opcode, plen, param, 0);
4386}
4387
1da177e4 4388/* Get data from the previously sent command */
a9de9248 4389void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4390{
4391 struct hci_command_hdr *hdr;
4392
4393 if (!hdev->sent_cmd)
4394 return NULL;
4395
4396 hdr = (void *) hdev->sent_cmd->data;
4397
a9de9248 4398 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4399 return NULL;
4400
f0e09510 4401 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4402
4403 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4404}
4405
4406/* Send ACL data */
4407static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4408{
4409 struct hci_acl_hdr *hdr;
4410 int len = skb->len;
4411
badff6d0
ACM
4412 skb_push(skb, HCI_ACL_HDR_SIZE);
4413 skb_reset_transport_header(skb);
9c70220b 4414 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4415 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4416 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4417}
4418
ee22be7e 4419static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4420 struct sk_buff *skb, __u16 flags)
1da177e4 4421{
ee22be7e 4422 struct hci_conn *conn = chan->conn;
1da177e4
LT
4423 struct hci_dev *hdev = conn->hdev;
4424 struct sk_buff *list;
4425
087bfd99
GP
4426 skb->len = skb_headlen(skb);
4427 skb->data_len = 0;
4428
4429 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4430
4431 switch (hdev->dev_type) {
4432 case HCI_BREDR:
4433 hci_add_acl_hdr(skb, conn->handle, flags);
4434 break;
4435 case HCI_AMP:
4436 hci_add_acl_hdr(skb, chan->handle, flags);
4437 break;
4438 default:
4439 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4440 return;
4441 }
087bfd99 4442
70f23020
AE
4443 list = skb_shinfo(skb)->frag_list;
4444 if (!list) {
1da177e4
LT
4445 /* Non fragmented */
4446 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4447
73d80deb 4448 skb_queue_tail(queue, skb);
1da177e4
LT
4449 } else {
4450 /* Fragmented */
4451 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4452
4453 skb_shinfo(skb)->frag_list = NULL;
4454
4455 /* Queue all fragments atomically */
af3e6359 4456 spin_lock(&queue->lock);
1da177e4 4457
73d80deb 4458 __skb_queue_tail(queue, skb);
e702112f
AE
4459
4460 flags &= ~ACL_START;
4461 flags |= ACL_CONT;
1da177e4
LT
4462 do {
4463 skb = list; list = list->next;
8e87d142 4464
0d48d939 4465 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4466 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4467
4468 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4469
73d80deb 4470 __skb_queue_tail(queue, skb);
1da177e4
LT
4471 } while (list);
4472
af3e6359 4473 spin_unlock(&queue->lock);
1da177e4 4474 }
73d80deb
LAD
4475}
4476
4477void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4478{
ee22be7e 4479 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4480
f0e09510 4481 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4482
ee22be7e 4483 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4484
3eff45ea 4485 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4486}
1da177e4
LT
4487
4488/* Send SCO data */
0d861d8b 4489void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4490{
4491 struct hci_dev *hdev = conn->hdev;
4492 struct hci_sco_hdr hdr;
4493
4494 BT_DBG("%s len %d", hdev->name, skb->len);
4495
aca3192c 4496 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4497 hdr.dlen = skb->len;
4498
badff6d0
ACM
4499 skb_push(skb, HCI_SCO_HDR_SIZE);
4500 skb_reset_transport_header(skb);
9c70220b 4501 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4502
0d48d939 4503 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4504
1da177e4 4505 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4506 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4507}
1da177e4
LT
4508
4509/* ---- HCI TX task (outgoing data) ---- */
4510
4511/* HCI Connection scheduler */
6039aa73
GP
4512static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4513 int *quote)
1da177e4
LT
4514{
4515 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4516 struct hci_conn *conn = NULL, *c;
abc5de8f 4517 unsigned int num = 0, min = ~0;
1da177e4 4518
8e87d142 4519 /* We don't have to lock device here. Connections are always
1da177e4 4520 * added and removed with TX task disabled. */
bf4c6325
GP
4521
4522 rcu_read_lock();
4523
4524 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4525 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4526 continue;
769be974
MH
4527
4528 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4529 continue;
4530
1da177e4
LT
4531 num++;
4532
4533 if (c->sent < min) {
4534 min = c->sent;
4535 conn = c;
4536 }
52087a79
LAD
4537
4538 if (hci_conn_num(hdev, type) == num)
4539 break;
1da177e4
LT
4540 }
4541
bf4c6325
GP
4542 rcu_read_unlock();
4543
1da177e4 4544 if (conn) {
6ed58ec5
VT
4545 int cnt, q;
4546
4547 switch (conn->type) {
4548 case ACL_LINK:
4549 cnt = hdev->acl_cnt;
4550 break;
4551 case SCO_LINK:
4552 case ESCO_LINK:
4553 cnt = hdev->sco_cnt;
4554 break;
4555 case LE_LINK:
4556 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4557 break;
4558 default:
4559 cnt = 0;
4560 BT_ERR("Unknown link type");
4561 }
4562
4563 q = cnt / num;
1da177e4
LT
4564 *quote = q ? q : 1;
4565 } else
4566 *quote = 0;
4567
4568 BT_DBG("conn %p quote %d", conn, *quote);
4569 return conn;
4570}
4571
6039aa73 4572static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4573{
4574 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4575 struct hci_conn *c;
1da177e4 4576
bae1f5d9 4577 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4578
bf4c6325
GP
4579 rcu_read_lock();
4580
1da177e4 4581 /* Kill stalled connections */
bf4c6325 4582 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4583 if (c->type == type && c->sent) {
6ed93dc6
AE
4584 BT_ERR("%s killing stalled connection %pMR",
4585 hdev->name, &c->dst);
bed71748 4586 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4587 }
4588 }
bf4c6325
GP
4589
4590 rcu_read_unlock();
1da177e4
LT
4591}
4592
6039aa73
GP
4593static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4594 int *quote)
1da177e4 4595{
73d80deb
LAD
4596 struct hci_conn_hash *h = &hdev->conn_hash;
4597 struct hci_chan *chan = NULL;
abc5de8f 4598 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4599 struct hci_conn *conn;
73d80deb
LAD
4600 int cnt, q, conn_num = 0;
4601
4602 BT_DBG("%s", hdev->name);
4603
bf4c6325
GP
4604 rcu_read_lock();
4605
4606 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4607 struct hci_chan *tmp;
4608
4609 if (conn->type != type)
4610 continue;
4611
4612 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4613 continue;
4614
4615 conn_num++;
4616
8192edef 4617 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4618 struct sk_buff *skb;
4619
4620 if (skb_queue_empty(&tmp->data_q))
4621 continue;
4622
4623 skb = skb_peek(&tmp->data_q);
4624 if (skb->priority < cur_prio)
4625 continue;
4626
4627 if (skb->priority > cur_prio) {
4628 num = 0;
4629 min = ~0;
4630 cur_prio = skb->priority;
4631 }
4632
4633 num++;
4634
4635 if (conn->sent < min) {
4636 min = conn->sent;
4637 chan = tmp;
4638 }
4639 }
4640
4641 if (hci_conn_num(hdev, type) == conn_num)
4642 break;
4643 }
4644
bf4c6325
GP
4645 rcu_read_unlock();
4646
73d80deb
LAD
4647 if (!chan)
4648 return NULL;
4649
4650 switch (chan->conn->type) {
4651 case ACL_LINK:
4652 cnt = hdev->acl_cnt;
4653 break;
bd1eb66b
AE
4654 case AMP_LINK:
4655 cnt = hdev->block_cnt;
4656 break;
73d80deb
LAD
4657 case SCO_LINK:
4658 case ESCO_LINK:
4659 cnt = hdev->sco_cnt;
4660 break;
4661 case LE_LINK:
4662 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4663 break;
4664 default:
4665 cnt = 0;
4666 BT_ERR("Unknown link type");
4667 }
4668
4669 q = cnt / num;
4670 *quote = q ? q : 1;
4671 BT_DBG("chan %p quote %d", chan, *quote);
4672 return chan;
4673}
4674
02b20f0b
LAD
4675static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4676{
4677 struct hci_conn_hash *h = &hdev->conn_hash;
4678 struct hci_conn *conn;
4679 int num = 0;
4680
4681 BT_DBG("%s", hdev->name);
4682
bf4c6325
GP
4683 rcu_read_lock();
4684
4685 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4686 struct hci_chan *chan;
4687
4688 if (conn->type != type)
4689 continue;
4690
4691 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4692 continue;
4693
4694 num++;
4695
8192edef 4696 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4697 struct sk_buff *skb;
4698
4699 if (chan->sent) {
4700 chan->sent = 0;
4701 continue;
4702 }
4703
4704 if (skb_queue_empty(&chan->data_q))
4705 continue;
4706
4707 skb = skb_peek(&chan->data_q);
4708 if (skb->priority >= HCI_PRIO_MAX - 1)
4709 continue;
4710
4711 skb->priority = HCI_PRIO_MAX - 1;
4712
4713 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4714 skb->priority);
02b20f0b
LAD
4715 }
4716
4717 if (hci_conn_num(hdev, type) == num)
4718 break;
4719 }
bf4c6325
GP
4720
4721 rcu_read_unlock();
4722
02b20f0b
LAD
4723}
4724
b71d385a
AE
4725static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4726{
4727 /* Calculate count of blocks used by this packet */
4728 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4729}
4730
6039aa73 4731static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4732{
1da177e4
LT
4733 if (!test_bit(HCI_RAW, &hdev->flags)) {
4734 /* ACL tx timeout must be longer than maximum
4735 * link supervision timeout (40.9 seconds) */
63d2bc1b 4736 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4737 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4738 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4739 }
63d2bc1b 4740}
1da177e4 4741
6039aa73 4742static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4743{
4744 unsigned int cnt = hdev->acl_cnt;
4745 struct hci_chan *chan;
4746 struct sk_buff *skb;
4747 int quote;
4748
4749 __check_timeout(hdev, cnt);
04837f64 4750
73d80deb 4751 while (hdev->acl_cnt &&
a8c5fb1a 4752 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4753 u32 priority = (skb_peek(&chan->data_q))->priority;
4754 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4755 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4756 skb->len, skb->priority);
73d80deb 4757
ec1cce24
LAD
4758 /* Stop if priority has changed */
4759 if (skb->priority < priority)
4760 break;
4761
4762 skb = skb_dequeue(&chan->data_q);
4763
73d80deb 4764 hci_conn_enter_active_mode(chan->conn,
04124681 4765 bt_cb(skb)->force_active);
04837f64 4766
57d17d70 4767 hci_send_frame(hdev, skb);
1da177e4
LT
4768 hdev->acl_last_tx = jiffies;
4769
4770 hdev->acl_cnt--;
73d80deb
LAD
4771 chan->sent++;
4772 chan->conn->sent++;
1da177e4
LT
4773 }
4774 }
02b20f0b
LAD
4775
4776 if (cnt != hdev->acl_cnt)
4777 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4778}
4779
6039aa73 4780static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4781{
63d2bc1b 4782 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4783 struct hci_chan *chan;
4784 struct sk_buff *skb;
4785 int quote;
bd1eb66b 4786 u8 type;
b71d385a 4787
63d2bc1b 4788 __check_timeout(hdev, cnt);
b71d385a 4789
bd1eb66b
AE
4790 BT_DBG("%s", hdev->name);
4791
4792 if (hdev->dev_type == HCI_AMP)
4793 type = AMP_LINK;
4794 else
4795 type = ACL_LINK;
4796
b71d385a 4797 while (hdev->block_cnt > 0 &&
bd1eb66b 4798 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4799 u32 priority = (skb_peek(&chan->data_q))->priority;
4800 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4801 int blocks;
4802
4803 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4804 skb->len, skb->priority);
b71d385a
AE
4805
4806 /* Stop if priority has changed */
4807 if (skb->priority < priority)
4808 break;
4809
4810 skb = skb_dequeue(&chan->data_q);
4811
4812 blocks = __get_blocks(hdev, skb);
4813 if (blocks > hdev->block_cnt)
4814 return;
4815
4816 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4817 bt_cb(skb)->force_active);
b71d385a 4818
57d17d70 4819 hci_send_frame(hdev, skb);
b71d385a
AE
4820 hdev->acl_last_tx = jiffies;
4821
4822 hdev->block_cnt -= blocks;
4823 quote -= blocks;
4824
4825 chan->sent += blocks;
4826 chan->conn->sent += blocks;
4827 }
4828 }
4829
4830 if (cnt != hdev->block_cnt)
bd1eb66b 4831 hci_prio_recalculate(hdev, type);
b71d385a
AE
4832}
4833
6039aa73 4834static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4835{
4836 BT_DBG("%s", hdev->name);
4837
bd1eb66b
AE
4838 /* No ACL link over BR/EDR controller */
4839 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4840 return;
4841
4842 /* No AMP link over AMP controller */
4843 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4844 return;
4845
4846 switch (hdev->flow_ctl_mode) {
4847 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4848 hci_sched_acl_pkt(hdev);
4849 break;
4850
4851 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4852 hci_sched_acl_blk(hdev);
4853 break;
4854 }
4855}
4856
1da177e4 4857/* Schedule SCO */
6039aa73 4858static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4859{
4860 struct hci_conn *conn;
4861 struct sk_buff *skb;
4862 int quote;
4863
4864 BT_DBG("%s", hdev->name);
4865
52087a79
LAD
4866 if (!hci_conn_num(hdev, SCO_LINK))
4867 return;
4868
1da177e4
LT
4869 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4870 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4871 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4872 hci_send_frame(hdev, skb);
1da177e4
LT
4873
4874 conn->sent++;
4875 if (conn->sent == ~0)
4876 conn->sent = 0;
4877 }
4878 }
4879}
4880
6039aa73 4881static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4882{
4883 struct hci_conn *conn;
4884 struct sk_buff *skb;
4885 int quote;
4886
4887 BT_DBG("%s", hdev->name);
4888
52087a79
LAD
4889 if (!hci_conn_num(hdev, ESCO_LINK))
4890 return;
4891
8fc9ced3
GP
4892 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4893 &quote))) {
b6a0dc82
MH
4894 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4895 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4896 hci_send_frame(hdev, skb);
b6a0dc82
MH
4897
4898 conn->sent++;
4899 if (conn->sent == ~0)
4900 conn->sent = 0;
4901 }
4902 }
4903}
4904
6039aa73 4905static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4906{
73d80deb 4907 struct hci_chan *chan;
6ed58ec5 4908 struct sk_buff *skb;
02b20f0b 4909 int quote, cnt, tmp;
6ed58ec5
VT
4910
4911 BT_DBG("%s", hdev->name);
4912
52087a79
LAD
4913 if (!hci_conn_num(hdev, LE_LINK))
4914 return;
4915
6ed58ec5
VT
4916 if (!test_bit(HCI_RAW, &hdev->flags)) {
4917 /* LE tx timeout must be longer than maximum
4918 * link supervision timeout (40.9 seconds) */
bae1f5d9 4919 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4920 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4921 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4922 }
4923
4924 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4925 tmp = cnt;
73d80deb 4926 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4927 u32 priority = (skb_peek(&chan->data_q))->priority;
4928 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4929 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4930 skb->len, skb->priority);
6ed58ec5 4931
ec1cce24
LAD
4932 /* Stop if priority has changed */
4933 if (skb->priority < priority)
4934 break;
4935
4936 skb = skb_dequeue(&chan->data_q);
4937
57d17d70 4938 hci_send_frame(hdev, skb);
6ed58ec5
VT
4939 hdev->le_last_tx = jiffies;
4940
4941 cnt--;
73d80deb
LAD
4942 chan->sent++;
4943 chan->conn->sent++;
6ed58ec5
VT
4944 }
4945 }
73d80deb 4946
6ed58ec5
VT
4947 if (hdev->le_pkts)
4948 hdev->le_cnt = cnt;
4949 else
4950 hdev->acl_cnt = cnt;
02b20f0b
LAD
4951
4952 if (cnt != tmp)
4953 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4954}
4955
3eff45ea 4956static void hci_tx_work(struct work_struct *work)
1da177e4 4957{
3eff45ea 4958 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4959 struct sk_buff *skb;
4960
6ed58ec5 4961 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4962 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4963
52de599e
MH
4964 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4965 /* Schedule queues and send stuff to HCI driver */
4966 hci_sched_acl(hdev);
4967 hci_sched_sco(hdev);
4968 hci_sched_esco(hdev);
4969 hci_sched_le(hdev);
4970 }
6ed58ec5 4971
1da177e4
LT
4972 /* Send next queued raw (unknown type) packet */
4973 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4974 hci_send_frame(hdev, skb);
1da177e4
LT
4975}
4976
25985edc 4977/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4978
4979/* ACL data packet */
6039aa73 4980static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4981{
4982 struct hci_acl_hdr *hdr = (void *) skb->data;
4983 struct hci_conn *conn;
4984 __u16 handle, flags;
4985
4986 skb_pull(skb, HCI_ACL_HDR_SIZE);
4987
4988 handle = __le16_to_cpu(hdr->handle);
4989 flags = hci_flags(handle);
4990 handle = hci_handle(handle);
4991
f0e09510 4992 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4993 handle, flags);
1da177e4
LT
4994
4995 hdev->stat.acl_rx++;
4996
4997 hci_dev_lock(hdev);
4998 conn = hci_conn_hash_lookup_handle(hdev, handle);
4999 hci_dev_unlock(hdev);
8e87d142 5000
1da177e4 5001 if (conn) {
65983fc7 5002 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5003
1da177e4 5004 /* Send to upper protocol */
686ebf28
UF
5005 l2cap_recv_acldata(conn, skb, flags);
5006 return;
1da177e4 5007 } else {
8e87d142 5008 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5009 hdev->name, handle);
1da177e4
LT
5010 }
5011
5012 kfree_skb(skb);
5013}
5014
5015/* SCO data packet */
6039aa73 5016static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5017{
5018 struct hci_sco_hdr *hdr = (void *) skb->data;
5019 struct hci_conn *conn;
5020 __u16 handle;
5021
5022 skb_pull(skb, HCI_SCO_HDR_SIZE);
5023
5024 handle = __le16_to_cpu(hdr->handle);
5025
f0e09510 5026 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5027
5028 hdev->stat.sco_rx++;
5029
5030 hci_dev_lock(hdev);
5031 conn = hci_conn_hash_lookup_handle(hdev, handle);
5032 hci_dev_unlock(hdev);
5033
5034 if (conn) {
1da177e4 5035 /* Send to upper protocol */
686ebf28
UF
5036 sco_recv_scodata(conn, skb);
5037 return;
1da177e4 5038 } else {
8e87d142 5039 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5040 hdev->name, handle);
1da177e4
LT
5041 }
5042
5043 kfree_skb(skb);
5044}
5045
9238f36a
JH
5046static bool hci_req_is_complete(struct hci_dev *hdev)
5047{
5048 struct sk_buff *skb;
5049
5050 skb = skb_peek(&hdev->cmd_q);
5051 if (!skb)
5052 return true;
5053
5054 return bt_cb(skb)->req.start;
5055}
5056
42c6b129
JH
5057static void hci_resend_last(struct hci_dev *hdev)
5058{
5059 struct hci_command_hdr *sent;
5060 struct sk_buff *skb;
5061 u16 opcode;
5062
5063 if (!hdev->sent_cmd)
5064 return;
5065
5066 sent = (void *) hdev->sent_cmd->data;
5067 opcode = __le16_to_cpu(sent->opcode);
5068 if (opcode == HCI_OP_RESET)
5069 return;
5070
5071 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5072 if (!skb)
5073 return;
5074
5075 skb_queue_head(&hdev->cmd_q, skb);
5076 queue_work(hdev->workqueue, &hdev->cmd_work);
5077}
5078
9238f36a
JH
5079void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5080{
5081 hci_req_complete_t req_complete = NULL;
5082 struct sk_buff *skb;
5083 unsigned long flags;
5084
5085 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5086
42c6b129
JH
5087 /* If the completed command doesn't match the last one that was
5088 * sent we need to do special handling of it.
9238f36a 5089 */
42c6b129
JH
5090 if (!hci_sent_cmd_data(hdev, opcode)) {
5091 /* Some CSR based controllers generate a spontaneous
5092 * reset complete event during init and any pending
5093 * command will never be completed. In such a case we
5094 * need to resend whatever was the last sent
5095 * command.
5096 */
5097 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5098 hci_resend_last(hdev);
5099
9238f36a 5100 return;
42c6b129 5101 }
9238f36a
JH
5102
5103 /* If the command succeeded and there's still more commands in
5104 * this request the request is not yet complete.
5105 */
5106 if (!status && !hci_req_is_complete(hdev))
5107 return;
5108
5109 /* If this was the last command in a request the complete
5110 * callback would be found in hdev->sent_cmd instead of the
5111 * command queue (hdev->cmd_q).
5112 */
5113 if (hdev->sent_cmd) {
5114 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5115
5116 if (req_complete) {
5117 /* We must set the complete callback to NULL to
5118 * avoid calling the callback more than once if
5119 * this function gets called again.
5120 */
5121 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5122
9238f36a 5123 goto call_complete;
53e21fbc 5124 }
9238f36a
JH
5125 }
5126
5127 /* Remove all pending commands belonging to this request */
5128 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5129 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5130 if (bt_cb(skb)->req.start) {
5131 __skb_queue_head(&hdev->cmd_q, skb);
5132 break;
5133 }
5134
5135 req_complete = bt_cb(skb)->req.complete;
5136 kfree_skb(skb);
5137 }
5138 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5139
5140call_complete:
5141 if (req_complete)
5142 req_complete(hdev, status);
5143}
5144
b78752cc 5145static void hci_rx_work(struct work_struct *work)
1da177e4 5146{
b78752cc 5147 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5148 struct sk_buff *skb;
5149
5150 BT_DBG("%s", hdev->name);
5151
1da177e4 5152 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5153 /* Send copy to monitor */
5154 hci_send_to_monitor(hdev, skb);
5155
1da177e4
LT
5156 if (atomic_read(&hdev->promisc)) {
5157 /* Send copy to the sockets */
470fe1b5 5158 hci_send_to_sock(hdev, skb);
1da177e4
LT
5159 }
5160
0736cfa8
MH
5161 if (test_bit(HCI_RAW, &hdev->flags) ||
5162 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5163 kfree_skb(skb);
5164 continue;
5165 }
5166
5167 if (test_bit(HCI_INIT, &hdev->flags)) {
5168 /* Don't process data packets in this states. */
0d48d939 5169 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5170 case HCI_ACLDATA_PKT:
5171 case HCI_SCODATA_PKT:
5172 kfree_skb(skb);
5173 continue;
3ff50b79 5174 }
1da177e4
LT
5175 }
5176
5177 /* Process frame */
0d48d939 5178 switch (bt_cb(skb)->pkt_type) {
1da177e4 5179 case HCI_EVENT_PKT:
b78752cc 5180 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5181 hci_event_packet(hdev, skb);
5182 break;
5183
5184 case HCI_ACLDATA_PKT:
5185 BT_DBG("%s ACL data packet", hdev->name);
5186 hci_acldata_packet(hdev, skb);
5187 break;
5188
5189 case HCI_SCODATA_PKT:
5190 BT_DBG("%s SCO data packet", hdev->name);
5191 hci_scodata_packet(hdev, skb);
5192 break;
5193
5194 default:
5195 kfree_skb(skb);
5196 break;
5197 }
5198 }
1da177e4
LT
5199}
5200
c347b765 5201static void hci_cmd_work(struct work_struct *work)
1da177e4 5202{
c347b765 5203 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5204 struct sk_buff *skb;
5205
2104786b
AE
5206 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5207 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5208
1da177e4 5209 /* Send queued commands */
5a08ecce
AE
5210 if (atomic_read(&hdev->cmd_cnt)) {
5211 skb = skb_dequeue(&hdev->cmd_q);
5212 if (!skb)
5213 return;
5214
7585b97a 5215 kfree_skb(hdev->sent_cmd);
1da177e4 5216
a675d7f1 5217 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5218 if (hdev->sent_cmd) {
1da177e4 5219 atomic_dec(&hdev->cmd_cnt);
57d17d70 5220 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
5221 if (test_bit(HCI_RESET, &hdev->flags))
5222 del_timer(&hdev->cmd_timer);
5223 else
5224 mod_timer(&hdev->cmd_timer,
5f246e89 5225 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
5226 } else {
5227 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5228 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5229 }
5230 }
5231}
b1efcc28
AG
5232
5233void hci_req_add_le_scan_disable(struct hci_request *req)
5234{
5235 struct hci_cp_le_set_scan_enable cp;
5236
5237 memset(&cp, 0, sizeof(cp));
5238 cp.enable = LE_SCAN_DISABLE;
5239 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5240}
a4790dbd 5241
8ef30fd3
AG
5242void hci_req_add_le_passive_scan(struct hci_request *req)
5243{
5244 struct hci_cp_le_set_scan_param param_cp;
5245 struct hci_cp_le_set_scan_enable enable_cp;
5246 struct hci_dev *hdev = req->hdev;
5247 u8 own_addr_type;
5248
5249 /* Set require_privacy to true to avoid identification from
5250 * unknown peer devices. Since this is passive scanning, no
5251 * SCAN_REQ using the local identity should be sent. Mandating
5252 * privacy is just an extra precaution.
5253 */
5254 if (hci_update_random_address(req, true, &own_addr_type))
5255 return;
5256
5257 memset(&param_cp, 0, sizeof(param_cp));
5258 param_cp.type = LE_SCAN_PASSIVE;
5259 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5260 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5261 param_cp.own_address_type = own_addr_type;
5262 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5263 &param_cp);
5264
5265 memset(&enable_cp, 0, sizeof(enable_cp));
5266 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5267 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5268 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5269 &enable_cp);
5270}
5271
a4790dbd
AG
5272static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5273{
5274 if (status)
5275 BT_DBG("HCI request failed to update background scanning: "
5276 "status 0x%2.2x", status);
5277}
5278
5279/* This function controls the background scanning based on hdev->pend_le_conns
5280 * list. If there are pending LE connection we start the background scanning,
5281 * otherwise we stop it.
5282 *
5283 * This function requires the caller holds hdev->lock.
5284 */
5285void hci_update_background_scan(struct hci_dev *hdev)
5286{
a4790dbd
AG
5287 struct hci_request req;
5288 struct hci_conn *conn;
5289 int err;
5290
5291 hci_req_init(&req, hdev);
5292
5293 if (list_empty(&hdev->pend_le_conns)) {
5294 /* If there is no pending LE connections, we should stop
5295 * the background scanning.
5296 */
5297
5298 /* If controller is not scanning we are done. */
5299 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5300 return;
5301
5302 hci_req_add_le_scan_disable(&req);
5303
5304 BT_DBG("%s stopping background scanning", hdev->name);
5305 } else {
a4790dbd
AG
5306 /* If there is at least one pending LE connection, we should
5307 * keep the background scan running.
5308 */
5309
a4790dbd
AG
5310 /* If controller is connecting, we should not start scanning
5311 * since some controllers are not able to scan and connect at
5312 * the same time.
5313 */
5314 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5315 if (conn)
5316 return;
5317
4340a124
AG
5318 /* If controller is currently scanning, we stop it to ensure we
5319 * don't miss any advertising (due to duplicates filter).
5320 */
5321 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5322 hci_req_add_le_scan_disable(&req);
5323
8ef30fd3 5324 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5325
5326 BT_DBG("%s starting background scanning", hdev->name);
5327 }
5328
5329 err = hci_req_run(&req, update_background_scan_complete);
5330 if (err)
5331 BT_ERR("Failed to run HCI request: err %d", err);
5332}
This page took 1.659833 seconds and 5 git commands to generate.